69#include "llvm/IR/IntrinsicsPowerPC.h"
104#define DEBUG_TYPE "ppc-lowering"
146 bool isPPC64 = Subtarget.
isPPC64();
206 if (!Subtarget.
hasSPE()) {
225 if (isPPC64 || Subtarget.
hasFPCVT()) {
383 !(TM.Options.UnsafeFPMath && Subtarget.
hasFRSQRTE() &&
388 !(TM.Options.UnsafeFPMath && Subtarget.
hasFRSQRTES() &&
497 if (TM.Options.UnsafeFPMath) {
719 if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
947 if (TM.Options.UnsafeFPMath) {
1243 if (Subtarget.
hasMMA()) {
1422 VTy->getPrimitiveSizeInBits().getFixedSize() >= 256)
1424 else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 &&
1433 for (
auto *EltTy : STy->elements()) {
1453 return Alignment.
value();
1461 return Subtarget.
hasSPE();
1483 return "PPCISD::FP_TO_UINT_IN_VSR,";
1485 return "PPCISD::FP_TO_SINT_IN_VSR";
1489 return "PPCISD::FTSQRT";
1491 return "PPCISD::FSQRT";
1496 return "PPCISD::XXSPLTI_SP_TO_DP";
1498 return "PPCISD::XXSPLTI32DX";
1533 return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
1535 return "PPCISD::ANDI_rec_1_EQ_BIT";
1537 return "PPCISD::ANDI_rec_1_GT_BIT";
1552 return "PPCISD::ST_VSR_SCAL_INT";
1577 return "PPCISD::PADDI_DTPREL";
1594 return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR";
1596 return "PPCISD::TLS_LOCAL_EXEC_MAT_ADDR";
1604 return "PPCISD::STRICT_FADDRTZ";
1606 return "PPCISD::STRICT_FCTIDZ";
1608 return "PPCISD::STRICT_FCTIWZ";
1610 return "PPCISD::STRICT_FCTIDUZ";
1612 return "PPCISD::STRICT_FCTIWUZ";
1614 return "PPCISD::STRICT_FCFID";
1616 return "PPCISD::STRICT_FCFIDU";
1618 return "PPCISD::STRICT_FCFIDS";
1620 return "PPCISD::STRICT_FCFIDUS";
1646 return CFP->getValueAPF().isZero();
1651 return CFP->getValueAPF().isZero();
1659 return Op < 0 || Op == Val;
1671 if (ShuffleKind == 0) {
1674 for (
unsigned i = 0;
i != 16; ++
i)
1677 }
else if (ShuffleKind == 2) {
1680 for (
unsigned i = 0;
i != 16; ++
i)
1683 }
else if (ShuffleKind == 1) {
1684 unsigned j =
IsLE ? 0 : 1;
1685 for (
unsigned i = 0;
i != 8; ++
i)
1702 if (ShuffleKind == 0) {
1705 for (
unsigned i = 0;
i != 16;
i += 2)
1709 }
else if (ShuffleKind == 2) {
1712 for (
unsigned i = 0;
i != 16;
i += 2)
1716 }
else if (ShuffleKind == 1) {
1717 unsigned j =
IsLE ? 0 : 2;
1718 for (
unsigned i = 0;
i != 8;
i += 2)
1744 if (ShuffleKind == 0) {
1747 for (
unsigned i = 0;
i != 16;
i += 4)
1753 }
else if (ShuffleKind == 2) {
1756 for (
unsigned i = 0;
i != 16;
i += 4)
1762 }
else if (ShuffleKind == 1) {
1763 unsigned j =
IsLE ? 0 : 4;
1764 for (
unsigned i = 0;
i != 8;
i += 4)
1784 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1785 "Unsupported merge size!");
1787 for (
unsigned i = 0;
i != 8/UnitSize; ++
i)
1788 for (
unsigned j = 0;
j != UnitSize; ++
j) {
1807 if (ShuffleKind == 1)
1809 else if (ShuffleKind == 2)
1814 if (ShuffleKind == 1)
1816 else if (ShuffleKind == 0)
1832 if (ShuffleKind == 1)
1834 else if (ShuffleKind == 2)
1839 if (ShuffleKind == 1)
1841 else if (ShuffleKind == 0)
1895 for (
unsigned i = 0;
i < 2; ++
i)
1896 for (
unsigned j = 0;
j < 4; ++
j)
1923 if (ShuffleKind == 1)
1925 else if (ShuffleKind == 2)
1932 if (ShuffleKind == 1)
1934 else if (ShuffleKind == 0)
1957 for (
i = 0;
i != 16 &&
SVOp->getMaskElt(
i) < 0; ++
i)
1960 if (
i == 16)
return -1;
1964 unsigned ShiftAmt =
SVOp->getMaskElt(
i);
1965 if (ShiftAmt <
i)
return -1;
1970 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1972 for (++
i;
i != 16; ++
i)
1975 }
else if (ShuffleKind == 1) {
1977 for (++
i;
i != 16; ++
i)
1984 ShiftAmt = 16 - ShiftAmt;
1994 EltSize <= 8 &&
"Can only handle 1,2,4,8 byte element sizes");
1998 if (
N->getMaskElt(0) % EltSize != 0)
2011 for (
unsigned i = 1;
i != EltSize; ++
i)
2015 for (
unsigned i = EltSize, e = 16;
i != e;
i += EltSize) {
2016 if (
N->getMaskElt(
i) < 0)
continue;
2017 for (
unsigned j = 0;
j != EltSize; ++
j)
2018 if (
N->getMaskElt(
i+
j) !=
N->getMaskElt(
j))
2035 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
2036 "Unexpected element width.");
2049 for (
unsigned int j = 1;
j < Width; ++
j) {
2066 unsigned M0 =
N->getMaskElt(0) / 4;
2067 unsigned M1 =
N->getMaskElt(4) / 4;
2068 unsigned M2 =
N->getMaskElt(8) / 4;
2069 unsigned M3 =
N->getMaskElt(12) / 4;
2076 if ((
M0 > 3 &&
M1 == 1 &&
M2 == 2 &&
M3 == 3) ||
2077 (
M0 < 4 &&
M1 == 5 &&
M2 == 6 &&
M3 == 7)) {
2084 if ((
M1 > 3 &&
M0 == 0 &&
M2 == 2 &&
M3 == 3) ||
2085 (
M1 < 4 &&
M0 == 4 &&
M2 == 6 &&
M3 == 7)) {
2092 if ((
M2 > 3 &&
M0 == 0 &&
M1 == 1 &&
M3 == 3) ||
2093 (
M2 < 4 &&
M0 == 4 &&
M1 == 5 &&
M3 == 7)) {
2100 if ((
M3 > 3 &&
M0 == 0 &&
M1 == 1 &&
M2 == 2) ||
2101 (
M3 < 4 &&
M0 == 4 &&
M1 == 5 &&
M2 == 6)) {
2110 if (
N->getOperand(1).isUndef()) {
2136 bool &Swap,
bool IsLE) {
2143 unsigned M0 =
N->getMaskElt(0) / 4;
2144 unsigned M1 =
N->getMaskElt(4) / 4;
2145 unsigned M2 =
N->getMaskElt(8) / 4;
2146 unsigned M3 =
N->getMaskElt(12) / 4;
2150 if (
N->getOperand(1).isUndef()) {
2151 assert(
M0 < 4 &&
"Indexing into an undef vector?");
2152 if (
M1 != (
M0 + 1) % 4 ||
M2 != (
M1 + 1) % 4 ||
M3 != (
M2 + 1) % 4)
2161 if (
M1 != (
M0 + 1) % 8 ||
M2 != (
M1 + 1) % 8 ||
M3 != (
M2 + 1) % 8)
2165 if (
M0 == 0 ||
M0 == 7 ||
M0 == 6 ||
M0 == 5) {
2171 }
else if (
M0 == 4 ||
M0 == 3 ||
M0 == 2 ||
M0 == 1) {
2181 if (
M0 == 0 ||
M0 == 1 ||
M0 == 2 ||
M0 == 3) {
2186 }
else if (
M0 == 4 ||
M0 == 5 ||
M0 == 6 ||
M0 == 7) {
2203 for (
int i = 0;
i < 16;
i += Width)
2204 if (
N->getMaskElt(
i) !=
i + Width - 1)
2235 bool &Swap,
bool IsLE) {
2242 unsigned M0 =
N->getMaskElt(0) / 8;
2243 unsigned M1 =
N->getMaskElt(8) / 8;
2244 assert(((
M0 |
M1) < 4) &&
"A mask element out of bounds?");
2248 if (
N->getOperand(1).isUndef()) {
2249 if ((
M0 |
M1) < 2) {
2250 DM =
IsLE ? (((~M1) & 1) << 1) + ((~
M0) & 1) : (
M0 << 1) + (
M1 & 1);
2258 if (
M0 > 1 &&
M1 < 2) {
2268 DM = (((~M1) & 1) << 1) + ((~
M0) & 1);
2273 }
else if (
M0 > 1 &&
M1 < 2) {
2281 DM = (
M0 << 1) + (
M1 & 1);
2295 return (16 / EltSize) - 1 - (
SVOp->getMaskElt(0) / EltSize);
2297 return SVOp->getMaskElt(0) / EltSize;
2311 unsigned EltSize = 16/
N->getNumOperands();
2312 if (EltSize < ByteSize) {
2313 unsigned Multiple = ByteSize/EltSize;
2318 for (
unsigned i = 0, e =
N->getNumOperands();
i != e; ++
i) {
2319 if (
N->getOperand(
i).isUndef())
continue;
2363 for (
unsigned i = 0, e =
N->getNumOperands();
i != e; ++
i) {
2364 if (
N->getOperand(
i).isUndef())
continue;
2365 if (!
OpVal.getNode())
2367 else if (
OpVal !=
N->getOperand(
i))
2378 assert(
CN->getValueType(0) ==
MVT::f32 &&
"Only one legal FP vector type!");
2488 }
else if (
N.getOpcode() ==
ISD::OR) {
2498 if (
LHSKnown.Zero.getBoolValue()) {
2583 }
else if (
N.getOperand(1).getOpcode() ==
PPCISD::Lo) {
2586 &&
"Cannot handle constant offsets yet!");
2595 }
else if (
N.getOpcode() ==
ISD::OR) {
2628 CN->getValueType(0));
2634 (int64_t)
CN->getZExtValue() == (
int)
CN->getZExtValue()) &&
2637 int Addr = (int)
CN->getZExtValue();
2644 unsigned Opc =
CN->getValueType(0) ==
MVT::i32 ? PPC::LIS : PPC::LIS8;
2689 if ((
LHSKnown.Zero.getZExtValue() | ~(uint64_t)Imm) != ~0
ULL)
2727 !
N.getOperand(1).hasOneUse() || !
N.getOperand(0).hasOneUse())) {
2773 if (!
MemVT.isSimple())
2775 switch(
MemVT.getSimpleVT().SimpleTy) {
2779 if (!ST.hasP8Vector())
2784 if (!ST.hasP9Vector())
2797 if (UI.getUse().get().getResNo() == 0 &&
2819 Ptr = LD->getBasePtr();
2820 VT = LD->getMemoryVT();
2821 Alignment = LD->getAlignment();
2823 Ptr = ST->getBasePtr();
2824 VT = ST->getMemoryVT();
2825 Alignment = ST->getAlignment();
2938 const bool Is64Bit = Subtarget.
isPPC64();
2969 return getTOCEntry(DAG,
SDLoc(CP),
GA);
2979 return getTOCEntry(DAG,
SDLoc(CP),
GA);
3057 return getTOCEntry(DAG,
SDLoc(JT),
GA);
3126 bool is64bit = Subtarget.
isPPC64();
3174 if (!
TM.isPositionIndependent())
3270 return getTOCEntry(DAG,
DL,
GA);
3281 return getTOCEntry(DAG,
DL,
GA);
3327 if (
C->isAllOnesValue() ||
C->isNullValue())
3338 EVT VT =
Op.getValueType();
3348 EVT VT =
Node->getValueType(0);
3401 DAG.getConstant(8, dl,
MVT::i32), ISD::
SETLT);
3406 DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
3421 DAG.getConstant(VT ==
MVT::i64 ? 2 : 1, dl,
3446 assert(!Subtarget.
isPPC64() &&
"LowerVACOPY is PPC32 only");
3450 return DAG.
getMemcpy(
Op.getOperand(0), Op,
Op.getOperand(1),
Op.getOperand(2),
3461 return Op.getOperand(0);
3482 Entry.
Ty = IntPtrTy;
3483 Entry.Node =
Trmp;
Args.push_back(Entry);
3486 Entry.Node = DAG.
getConstant(isPPC64 ? 48 : 40, dl,
3487 isPPC64 ?
MVT::i64 :
MVT::i32);
3488 Args.push_back(Entry);
3490 Entry.Node =
FPtr;
Args.push_back(Entry);
3491 Entry.Node =
Nest;
Args.push_back(Entry);
3495 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3550 uint64_t FrameOffset =
PtrVT.getSizeInBits()/8;
3556 uint64_t FPROffset = 1;
3589static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5,
3590 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10,
3591 PPC::F11, PPC::F12, PPC::F13};
3598 if (Flags.isByVal())
3599 ArgSize = Flags.getByValSize();
3603 if (!Flags.isInConsecutiveRegs())
3621 Alignment =
Align(16);
3624 if (Flags.isByVal()) {
3625 auto BVAlign = Flags.getNonZeroByValAlign();
3629 "ByVal alignment is not a multiple of the pointer size");
3636 if (Flags.isInConsecutiveRegs()) {
3671 if (Flags.isInConsecutiveRegsLast())
3680 if (!Flags.isByVal()) {
3702 unsigned NumBytes) {
3706SDValue PPCTargetLowering::LowerFormalArguments(
3711 return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
3714 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3717 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3721SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3772 CCInfo.AllocateStack(LinkageSize,
PtrAlign);
3774 CCInfo.PreAnalyzeFormalArguments(Ins);
3777 CCInfo.clearWasPPCF128();
3779 for (
unsigned i = 0, e =
ArgLocs.size();
i !=
e; ++
i) {
3783 if (
VA.isRegLoc()) {
3785 EVT ValVT =
VA.getValVT();
3792 RC = &PPC::GPRCRegClass;
3796 RC = &PPC::VSSRCRegClass;
3797 else if (Subtarget.
hasSPE())
3798 RC = &PPC::GPRCRegClass;
3800 RC = &PPC::F4RCRegClass;
3804 RC = &PPC::VSFRCRegClass;
3805 else if (Subtarget.
hasSPE())
3807 RC = &PPC::GPRCRegClass;
3809 RC = &PPC::F8RCRegClass;
3814 RC = &PPC::VRRCRegClass;
3817 RC = &PPC::VRRCRegClass;
3821 RC = &PPC::VRRCRegClass;
3829 assert(
i + 1 < e &&
"No second half of double precision argument");
3846 InVals.push_back(ArgValue);
3852 unsigned ArgSize =
VA.getLocVT().getStoreSize();
3854 unsigned ObjSize =
VA.getValVT().getStoreSize();
3880 unsigned MinReservedArea =
CCByValInfo.getNextStackOffset();
3881 MinReservedArea = std::max(MinReservedArea, LinkageSize);
3897 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3898 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3903 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3920 CCInfo.getNextStackOffset(),
true));
3976 const SDLoc &dl)
const {
3980 else if (Flags.isZExt())
3987SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
4000 "fastcc not supported on varargs functions");
4010 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4011 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4014 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4015 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4030 unsigned NumBytes = LinkageSize;
4033 for (
unsigned i = 0, e =
Ins.size();
i !=
e; ++
i) {
4034 if (Ins[
i].Flags.isNest())
4052 for (
unsigned ArgNo = 0, e =
Ins.size(); ArgNo !=
e; ++ArgNo) {
4060 if (Ins[ArgNo].isOrigArg()) {
4087 if (Flags.isByVal()) {
4088 assert(Ins[ArgNo].isOrigArg() &&
"Byval arguments cannot be implicit");
4094 ObjSize = Flags.getByValSize();
4106 InVals.push_back(
FIN);
4129 if (!isLittleEndian) {
4133 InVals.push_back(
Arg);
4164 InVals.push_back(
FIN);
4188 switch (
ObjectVT.getSimpleVT().SimpleTy) {
4193 if (Flags.isNest()) {
4195 unsigned VReg = MF.
addLiveIn(PPC::X11, &PPC::G8RCRegClass);
4238 ? &PPC::VSSRCRegClass
4242 ? &PPC::VSFRCRegClass
4261 DAG.getConstant(32, dl,
MVT::i32));
4279 if (Flags.isInConsecutiveRegsLast())
4318 InVals.push_back(
ArgVal);
4322 unsigned MinReservedArea;
4326 MinReservedArea = LinkageSize;
4374 unsigned ParamSize) {
4376 if (!isTailCall)
return 0;
4397 "PC Relative callers do not have a TOC and cannot share a TOC Base");
4413 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4438 if (
STICallee->isUsingPCRelativeCalls())
4458 if (TM.getFunctionSections() || GV->
hasComdat() || Caller->hasComdat() ||
4462 if (
F->getSectionPrefix() != Caller->getSectionPrefix())
4478 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4479 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4482 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4483 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4491 unsigned NumBytes = LinkageSize;
4496 if (Param.Flags.isNest())
continue;
4553bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4562 if (isVarArg)
return false;
4641PPCTargetLowering::IsEligibleForTailCallOptimization(
SDValue Callee,
4657 for (
unsigned i = 0;
i !=
Ins.size();
i++) {
4659 if (Flags.isByVal())
return false;
4669 return G->getGlobal()->hasHiddenVisibility()
4670 ||
G->getGlobal()->hasProtectedVisibility();
4680 if (!
C)
return nullptr;
4682 int Addr =
C->getZExtValue();
4683 if ((
Addr & 3) != 0 ||
4689 (
int)
C->getZExtValue() >> 2,
SDLoc(Op),
4696struct TailCallArgumentInfo {
4701 TailCallArgumentInfo() =
default;
4732 bool isPPC64 = Subtarget.
isPPC64();
4733 int SlotSize = isPPC64 ? 8 : 4;
4756 TailCallArgumentInfo
Info;
4758 Info.FrameIdxOp =
FIN;
4766SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
4772 LROpOut = getReturnAddrFrameIndex(DAG);
4790 Flags.getNonZeroByValAlign(),
false,
false,
false,
4851 return G->getGlobal()->getValueType()->isFunctionTy();
4857SDValue PPCTargetLowering::LowerCallResult(
4871 for (
unsigned i = 0, e =
RVLocs.size();
i !=
e; ++
i) {
4873 assert(
VA.isRegLoc() &&
"Can only return in registers!");
4880 Chain =
Lo.getValue(1);
4885 Chain =
Hi.getValue(1);
4897 switch (
VA.getLocInfo()) {
4915 InVals.push_back(Val);
5036 const char *SymName = S->getSymbol();
5054 return Sec->getQualNameSymbol();
5070 "Expected a CALLSEQ_STARTSDNode.");
5145 const unsigned Alignment = Subtarget.
isPPC64() ? 8 : 4;
5149 Alignment, MMOFlags);
5169 Chain =
TOCVal.getValue(0);
5170 Glue =
TOCVal.getValue(1);
5175 "Nest parameter is not supported on AIX.");
5178 Chain =
EnvVal.getValue(0);
5179 Glue =
EnvVal.getValue(1);
5194 const bool IsPPC64 = Subtarget.
isPPC64();
5199 Ops.push_back(Chain);
5205 assert(!
CFlags.IsPatchPoint &&
"Patch point calls are not indirect.");
5234 Ops.push_back(DAG.
getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
5262 assert(Mask &&
"Missing call preserved mask for calling convention");
5267 Ops.push_back(Glue);
5270SDValue PPCTargetLowering::FinishCall(
5289 dl,
CFlags.HasNest, Subtarget);
5308 "Expecting a global address, external symbol, absolute value, "
5309 "register or an indirect tail call when PC Relative calls are "
5313 "Unexpected call opcode for a tail call.");
5336 return LowerCallResult(Chain, Glue,
CFlags.CallConv,
CFlags.IsVarArg, Ins, dl,
5360 isTailCall = IsEligibleForTailCallOptimization_64SVR4(
5361 Callee, CallConv, CB, isVarArg, Outs, Ins, DAG);
5363 isTailCall = IsEligibleForTailCallOptimization(
Callee, CallConv, isVarArg,
5378 "Callee should be an llvm::Function object.");
5381 <<
"\nTCO callee: ");
5388 "site marked musttail");
5406 return LowerCall_AIX(Chain,
Callee,
CFlags, Outs, OutVals, Ins, dl, DAG,
5411 return LowerCall_64SVR4(Chain,
Callee,
CFlags, Outs, OutVals, Ins, dl, DAG,
5413 return LowerCall_32SVR4(Chain,
Callee,
CFlags, Outs, OutVals, Ins, dl, DAG,
5417SDValue PPCTargetLowering::LowerCall_32SVR4(
5428 const bool IsVarArg =
CFlags.IsVarArg;
5429 const bool IsTailCall =
CFlags.IsTailCall;
5460 CCInfo.PreAnalyzeCallOperands(Outs);
5466 unsigned NumArgs = Outs.size();
5468 for (
unsigned i = 0;
i != NumArgs; ++
i) {
5469 MVT ArgVT = Outs[
i].VT;
5473 if (Outs[
i].IsFixed) {
5483 errs() <<
"Call operand #" <<
i <<
" has unhandled type "
5493 CCInfo.clearWasPPCF128();
5507 unsigned NumBytes =
CCByValInfo.getNextStackOffset();
5521 Chain = EmitTailCallLoadFPAndRetAddr(DAG,
SPDiff, Chain,
LROp,
FPOp, dl);
5544 if (Flags.isByVal()) {
5588 if (
VA.isRegLoc()) {
5656SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5663 int64_t FrameSize =
CallSeqStart.getConstantOperandVal(1);
5671SDValue PPCTargetLowering::LowerCall_64SVR4(
5680 unsigned NumOps = Outs.size();
5701 "fastcc not supported on varargs functions");
5708 unsigned NumBytes = LinkageSize;
5712 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5713 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5716 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
5717 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5734 for (
unsigned i = 0;
i !=
NumOps; ++
i) {
5735 if (Outs[
i].Flags.isNest())
continue;
5753 for (
unsigned i = 0;
i !=
NumOps; ++
i) {
5755 EVT ArgVT = Outs[
i].VT;
5762 if (Flags.isByVal()) {
5805 if (Flags.isInConsecutiveRegsLast())
5820 NumBytes = std::max(NumBytes, LinkageSize + 8 *
PtrByteSize);
5822 NumBytes = LinkageSize;
5849 Chain = EmitTailCallLoadFPAndRetAddr(DAG,
SPDiff, Chain,
LROp,
FPOp, dl);
5866 for (
unsigned i = 0;
i !=
NumOps; ++
i) {
5869 EVT ArgVT = Outs[
i].VT;
5908 if (Flags.isByVal()) {
5914 unsigned Size = Flags.getByValSize();
5940 if (!isLittleEndian) {
5980 if (!isLittleEndian) {
6018 switch (
Arg.getSimpleValueType().SimpleTy) {
6023 if (Flags.isNest()) {
6039 "Parameter area must exist to pass an argument in memory.");
6084 }
else if (!Flags.isInConsecutiveRegs()) {
6094 if (!isLittleEndian)
6099 }
else if (Flags.isInConsecutiveRegsLast()) {
6102 if (!isLittleEndian)
6120 !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6126 "Parameter area must exist to pass an argument in memory.");
6138 Flags.isInConsecutiveRegs()) ? 4 : 8;
6139 if (Flags.isInConsecutiveRegsLast())
6162 "Parameter area must exist if we have a varargs call.");
6196 "Parameter area must exist to pass an argument in memory.");
6211 "mismatch in size of parameter area");
6224 assert(!
CFlags.IsTailCall &&
"Indirect tails calls not supported");
6239 if (isELFv2ABI && !
CFlags.IsPatchPoint)
6265 State.getMachineFunction().getSubtarget());
6266 const bool IsPPC64 = Subtarget.
isPPC64();
6270 if (ValVT.
isVector() && !State.getMachineFunction()
6272 .Options.EnableAIXExtendedAltivecABI)
6282 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6283 PPC::R7, PPC::R8, PPC::R9, PPC::R10};
6285 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6286 PPC::X7, PPC::X8, PPC::X9, PPC::X10};
6289 PPC::V2, PPC::V3, PPC::V4, PPC::V5,
6290 PPC::V6, PPC::V7, PPC::V8, PPC::V9,
6291 PPC::V10, PPC::V11, PPC::V12, PPC::V13};
6296 "register width are not supported.");
6304 State.getNextStackOffset(), RegVT,
6313 if (
unsigned Reg = State.AllocateReg(IsPPC64 ?
GPR_64 :
GPR_32))
6331 assert(IsPPC64 &&
"PPC32 should have split i64 values.");
6340 if (
unsigned Reg = State.AllocateReg(IsPPC64 ?
GPR_64 :
GPR_32))
6354 State.AllocateStack(IsPPC64 ? 8 : StoreSize,
Align(4));
6355 unsigned FReg = State.AllocateReg(
FPR);
6360 for (
unsigned I = 0;
I < StoreSize;
I +=
PtrAlign.value()) {
6361 if (
unsigned Reg = State.AllocateReg(IsPPC64 ?
GPR_64 :
GPR_32)) {
6362 assert(FReg &&
"An FPR should be available when a GPR is reserved.");
6363 if (State.isVarArg()) {
6395 if (State.isVarArg())
6397 "variadic arguments for vector types are unimplemented for AIX");
6399 if (
unsigned VReg = State.AllocateReg(VR))
6403 "passing vector parameters to the stack is unimplemented for AIX");
6414 "i64 should have been split for 32-bit codegen.");
6422 return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
6424 return &PPC::F4RCRegClass;
6426 return &PPC::F8RCRegClass;
6434 return &PPC::VRRCRegClass;
6447 else if (Flags.isZExt())
6455 const unsigned LASize =
FL->getLinkageSize();
6457 if (PPC::GPRCRegClass.
contains(Reg)) {
6458 assert(Reg >= PPC::R3 && Reg <= PPC::R10 &&
6459 "Reg must be a valid argument register!");
6460 return LASize + 4 * (Reg - PPC::R3);
6463 if (PPC::G8RCRegClass.
contains(Reg)) {
6464 assert(Reg >= PPC::X3 && Reg <= PPC::X10 &&
6465 "Reg must be a valid argument register!");
6466 return LASize + 8 * (Reg - PPC::X3);
6512SDValue PPCTargetLowering::LowerFormalArguments_AIX(
6519 "Unexpected calling convention!");
6530 const bool IsPPC64 = Subtarget.
isPPC64();
6544 CCInfo.AnalyzeFormalArguments(Ins,
CC_AIX);
6548 for (
size_t I = 0, End =
ArgLocs.size();
I != End; ) {
6550 MVT LocVT =
VA.getLocVT();
6552 if (
VA.isMemLoc() &&
VA.getValVT().isVector())
6554 "passing vector parameters to the stack is unimplemented for AIX");
6562 if (
VA.isMemLoc() &&
VA.needsCustom())
6565 if (
VA.isRegLoc()) {
6566 if (
VA.getValVT().isScalarInteger())
6568 else if (
VA.getValVT().isFloatingPoint() && !
VA.getValVT().isVector())
6574 if (Flags.isByVal() &&
VA.isMemLoc()) {
6575 const unsigned Size =
6579 Size,
VA.getLocMemOffset(),
false,
6582 InVals.push_back(
FIN);
6587 if (Flags.isByVal()) {
6588 assert(
VA.isRegLoc() &&
"MemLocs should already be handled.");
6601 InVals.push_back(
FIN);
6605 IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
6609 const unsigned VReg = MF.
addLiveIn(PhysReg, RegClass);
6620 CopyFrom.
getValue(1), dl, CopyFrom,
6633 "RegLocs should be for ByVal argument.");
6640 if (
Offset != StackSize) {
6642 "Expected MemLoc for remaining bytes.");
6643 assert(
ArgLocs[
I].isMemLoc() &&
"Expected MemLoc for remaining bytes.");
6652 EVT ValVT =
VA.getValVT();
6653 if (
VA.isRegLoc() && !
VA.needsCustom()) {
6663 InVals.push_back(ArgValue);
6666 if (
VA.isMemLoc()) {
6670 "Object size is larger than size of MemLoc");
6676 const bool IsImmutable =
6683 InVals.push_back(ArgValue);
6707 static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6708 PPC::R7, PPC::R8, PPC::R9, PPC::R10};
6710 static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6711 PPC::X7, PPC::X8, PPC::X9, PPC::X10};
6718 (CCInfo.getNextStackOffset() - LinkageSize) /
PtrByteSize;
6721 const unsigned VReg =
6741SDValue PPCTargetLowering::LowerCall_AIX(
6754 "Unexpected calling convention!");
6772 const bool IsPPC64 = Subtarget.
isPPC64();
6776 CCInfo.AnalyzeCallOperands(Outs,
CC_AIX);
6786 CCInfo.getNextStackOffset());
6802 for (
unsigned I = 0,
E =
ArgLocs.size();
I !=
E;) {
6803 const unsigned ValNo =
ArgLocs[
I].getValNo();
6807 if (Flags.isByVal()) {
6808 const unsigned ByValSize = Flags.getByValSize();
6834 "Unexpected location for pass-by-value argument.");
6843 "Expected additional location for by-value argument.");
6855 DAG.getObjectPtrOffset(dl,
StackPtr,
6868 "Unexpected register residue for by-value argument.");
6883 assert(
PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&
6884 "Unexpected load emitted during handling of pass-by-value "
6886 unsigned NumSHLBits =
PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8);
6903 const MVT LocVT =
VA.getLocVT();
6904 const MVT ValVT =
VA.getValVT();
6906 if (
VA.isMemLoc() &&
VA.getValVT().isVector())
6908 "passing vector parameters to the stack is unimplemented for AIX");
6910 switch (
VA.getLocInfo()) {
6923 if (
VA.isRegLoc() && !
VA.needsCustom()) {
6928 if (
VA.isMemLoc()) {
6942 "Unexpected register handling for calling convention.");
6951 else if (
Arg.getValueType().getFixedSizeInBits() <
6960 "Unexpected custom register for argument!");
6972 assert(
PeekArg.needsCustom() &&
"A second custom GPR is expected.");
6987 assert(!
CFlags.IsTailCall &&
"Indirect tail-calls not supported.");
6991 const unsigned TOCSaveOffset =
7024 return CCInfo.CheckReturn(
7039 CCInfo.AnalyzeReturn(Outs,
7050 assert(
VA.isRegLoc() &&
"Can only return in registers!");
7054 switch (
VA.getLocInfo()) {
7096PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(
SDValue Op,
7121 bool isPPC64 = Subtarget.
isPPC64();
7122 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
7142 bool isPPC64 = Subtarget.
isPPC64();
7163PPCTargetLowering::getFramePointerFrameIndex(
SelectionDAG & DAG)
const {
7165 bool isPPC64 = Subtarget.
isPPC64();
7211 bool isPPC64 = Subtarget.
isPPC64();
7223 Op.getOperand(0),
Op.getOperand(1));
7230 Op.getOperand(0),
Op.getOperand(1));
7234 if (
Op.getValueType().isVector())
7235 return LowerVectorLoad(Op, DAG);
7238 "Custom lowering only for i1 loads");
7259 if (
Op.getOperand(1).getValueType().isVector())
7260 return LowerVectorStore(Op, DAG);
7263 "Custom lowering only for i1 stores");
7283 "Custom lowering only for i1 results");
7312 assert(
TrgVT.isVector() &&
"Vector type expected.");
7327 if (
SrcSize == 256 &&
SrcVT.getVectorNumElements() < 2)
7338 N1.getValueType().getHalfNumVectorElementsVT(*DAG.
getContext());
7374 if (!
Op.getOperand(0).getValueType().isFloatingPoint() ||
7375 !
Op.getOperand(2).getValueType().isFloatingPoint() || Subtarget.
hasSPE())
7382 SDValue LHS =
Op.getOperand(0), RHS =
Op.getOperand(1);
7420 if (LHS.getValueType() ==
MVT::f32)
7433 if (LHS.getValueType() ==
MVT::f32)
7442 if (LHS.getValueType() ==
MVT::f32)
7517 bool IsStrict = Op->isStrictFPOpcode();
7540 switch (Op.getSimpleValueType().SimpleTy) {
7548 "i64 FP_TO_UINT is supported only with FPCVT");
7554 {Chain, Src}, Flags);
7561void PPCTargetLowering::LowerFP_TO_INTForReuse(
SDValue Op, ReuseLoadInfo &
RLI,
7563 const SDLoc &dl)
const {
7571 (IsSigned || Subtarget.
hasFPCVT());
7582 Alignment =
Align(4);
7589 Chain = DAG.
getStore(Chain, dl, Tmp,
FIPtr, MPI, Alignment);
7602 RLI.Alignment = Alignment;
7610 const SDLoc &dl)
const {
7613 if (
Op->isStrictFPOpcode())
7620 const SDLoc &dl)
const {
7653 {Op.getOperand(0), Lo, Hi}, Flags);
7656 {Res.getValue(1), Res}, Flags);
7662 const uint64_t
TwoE31[] = {0x41e0000000000000LL, 0};
7678 Chain =
Sel.getValue(1);
7686 {Chain, Src, FltOfs}, Flags);
7690 {Chain, Val}, Flags);
7691 Chain =
SInt.getValue(1);
7712 return LowerFP_TO_INTDirectMove(Op, DAG, dl);
7715 LowerFP_TO_INTForReuse(Op,
RLI, DAG, dl);
7718 RLI.Alignment,
RLI.MMOFlags(),
RLI.AAInfo,
RLI.Ranges);
7734 if (
Op->isStrictFPOpcode())
7743 Op.getOperand(0).getValueType())) {
7745 LowerFP_TO_INTForReuse(Op,
RLI, DAG, dl);
7750 if (!LD ||
LD->getExtensionType() !=
ET ||
LD->isVolatile() ||
7751 LD->isNonTemporal())
7753 if (
LD->getMemoryVT() !=
MemVT)
7763 RLI.Ptr =
LD->getBasePtr();
7764 if (
LD->isIndexed() && !
LD->getOffset().isUndef()) {
7766 "Non-pre-inc AM on PPC?");
7771 RLI.Chain =
LD->getChain();
7772 RLI.MPI =
LD->getPointerInfo();
7773 RLI.IsDereferenceable =
LD->isDereferenceable();
7774 RLI.IsInvariant =
LD->isInvariant();
7775 RLI.Alignment =
LD->getAlign();
7776 RLI.AAInfo =
LD->getAAInfo();
7777 RLI.Ranges =
LD->getRanges();
7787void PPCTargetLowering::spliceIntoChain(
SDValue ResChain,
7798 "A new TF really is required here");
7807bool PPCTargetLowering::directMoveIsProfitable(
const SDValue &Op)
const {
7808 SDNode *Origin =
Op.getOperand(0).getNode();
7823 if (UI.getUse().get().getResNo() != 0)
7853 if (Op->isStrictFPOpcode()) {
7855 Chain = Op.getOperand(0);
7867 const SDLoc &dl)
const {
7870 "Invalid floating point type as target of conversion");
7872 "Int to FP conversions with direct moves require FPCVT");
7873 SDValue Src =
Op.getOperand(
Op->isStrictFPOpcode() ? 1 : 0);
7903 const SDLoc &dl)
const {
7905 unsigned Opc =
Op.getOpcode();
7909 "Unexpected conversion type");
7911 "Supports conversions to v2f64/v4f32 only.");
7957 {Op.getOperand(0), Extend}, Flags);
7977 if (
OutVT.isVector() &&
OutVT.isFloatingPoint() &&
7979 return LowerINT_TO_FPVector(Op, DAG, dl);
8001 if (Subtarget.
hasDirectMove() && directMoveIsProfitable(Op) &&
8003 return LowerINT_TO_FPDirectMove(Op, DAG, dl);
8006 "UINT_TO_FP is supported only with FPCVT");
8064 RLI.Alignment,
RLI.MMOFlags(),
RLI.AAInfo,
RLI.Ranges);
8065 spliceIntoChain(
RLI.ResChain,
Bits.getValue(1), DAG);
8075 spliceIntoChain(
RLI.ResChain,
Bits.getValue(1), DAG);
8085 spliceIntoChain(
RLI.ResChain,
Bits.getValue(1), DAG);
8103 "Expected an i32 store");
8118 Ops,
MVT::i32, MMO);
8119 Chain =
Bits.getValue(1);
8131 {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
8140 "Unhandled INT_TO_FP type in custom expander!");
8163 "Expected an i32 store");
8179 Chain =
Ld.getValue(1);
8181 spliceIntoChain(
RLI.ResChain,
Ld.getValue(1), DAG);
8184 "i32->FP without LFIWAX supported only on PPC64");
8201 Chain =
Ld.getValue(1);
8212 {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
8243 EVT VT =
Op.getValueType();
8249 Chain =
MFFS.getValue(1);
8263 "Stack slot adjustment is valid only on big endian subtargets!");
8267 Chain =
CWD.getValue(1);
8293 EVT VT =
Op.getValueType();
8297 VT ==
Op.getOperand(1).getValueType() &&
8322 EVT VT =
Op.getValueType();
8326 VT ==
Op.getOperand(1).getValueType() &&
8352 EVT VT =
Op.getValueType();
8355 VT ==
Op.getOperand(1).getValueType() &&
8382 EVT VT =
Op.getValueType();
8419 if (Val == ((1LLU << (
SplatSize * 8)) - 1)) {
8468 for (
unsigned i = 0;
i != 16; ++
i)
8489 EVT VecVT = V->getValueType(0);
8496 bool IsSplat =
true;
8497 bool IsLoad =
false;
8503 if (V->isConstant())
8505 for (
int i = 0, e = V->getNumOperands();
i < e; ++
i) {
8506 if (V->getOperand(
i).isUndef())
8510 if (V->getOperand(
i).getOpcode() ==
ISD::LOAD ||
8512 V->getOperand(
i).getOperand(0).getOpcode() ==
ISD::LOAD) ||
8514 V->getOperand(
i).getOperand(0).getOpcode() ==
ISD::LOAD) ||
8516 V->getOperand(
i).getOperand(0).getOpcode() ==
ISD::LOAD))
8520 if (V->getOperand(
i) != Op0 ||
8521 (!IsLoad && !V->isOnlyUserOf(V->getOperand(
i).getNode())))
8524 return !(IsSplat && IsLoad);
8595 assert(
BVN &&
"Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
8599 unsigned SplatBitSize;
8659 unsigned ElementSize =
LD->getMemoryVT().getScalarSizeInBits();
8670 ((Subtarget.
hasVSX() && ElementSize == 64) ||
8671 (Subtarget.
hasP9Vector() && ElementSize == 32))) {
8679 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
8705 if (SplatBits == 0) {
8721 Op.getValueType(), DAG, dl);
8733 int32_t
SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
8757 if (VT ==
Op.getValueType())
8766 if (
SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
8780 static const signed char SplatCsts[] = {
8781 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
8782 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
8797 static const unsigned IIDs[] = {
8798 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
8799 Intrinsic::ppc_altivec_vslw
8808 static const unsigned IIDs[] = {
8809 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
8810 Intrinsic::ppc_altivec_vsrw
8820 static const unsigned IIDs[] = {
8821 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
8822 Intrinsic::ppc_altivec_vrlw
8829 if (
SextVal == (
int)(((
unsigned)
i << 8) | (
i < 0 ? 0xFF : 0))) {
8835 if (
SextVal == (
int)(((
unsigned)
i << 16) | (
i < 0 ? 0xFFFF : 0))) {
8841 if (
SextVal == (
int)(((
unsigned)
i << 24) | (
i < 0 ? 0xFFFFFF : 0))) {
8856 unsigned OpNum = (
PFEntry >> 26) & 0x0F;
8874 if (
LHSID == (1*9+2)*9+3)
return LHS;
8875 assert(
LHSID == ((4*9+5)*9+6)*9+7 &&
"Illegal OP_COPY!");
8899 for (
unsigned i = 0;
i != 16; ++
i)
8903 for (
unsigned i = 0;
i != 16; ++
i)
8907 for (
unsigned i = 0;
i != 16; ++
i)
8911 for (
unsigned i = 0;
i != 16; ++
i)
8943 0, 15, 14, 13, 12, 11, 10, 9};
8945 1, 2, 3, 4, 5, 6, 7, 8};
8948 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
9151 auto ShuffleMask =
SVN->getMask();
9173 unsigned SplatBitSize;
9189 if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) &&
9190 (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 &&
9191 ShuffleMask[4] > 15 && ShuffleMask[12] > 15))
9193 else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) &&
9194 (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 &&
9195 ShuffleMask[0] > 15 && ShuffleMask[8] > 15))
9203 for (; SplatBitSize < 32; SplatBitSize <<= 1)
9219 "Only set v1i128 as custom, other type shouldn't reach here!");
9223 unsigned SHLAmt =
N1.getConstantOperandVal(0);
9226 std::iota(
Mask.begin(),
Mask.end(), 0);
9261 V1 =
Op.getOperand(0);
9262 V2 =
Op.getOperand(1);
9264 EVT VT =
Op.getValueType();
9286 assert(isLittleEndian &&
"Unexpected permuted load on big endian target");
9289 "Splat of a value outside of the loaded memory");
9314 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
9316 if (
LdSplt.getValueType() !=
SVOp->getValueType(0))
9355 if (Subtarget.
hasVSX() &&
9368 if (Subtarget.
hasVSX() &&
9401 if (Subtarget.
hasVSX()) {
9446 unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
9470 for (
unsigned j = 0;
j != 4; ++
j) {
9501 unsigned Cost = (
PFEntry >> 30);
9520 if (
V2.isUndef())
V2 =
V1;
9529 EVT EltVT =
V1.getValueType().getVectorElementType();
9547 LLVM_DEBUG(
dbgs() <<
"Emitting a VPERM for the following shuffle:\n");
9549 LLVM_DEBUG(
dbgs() <<
"With the following permute control vector:\n");
9565 unsigned IntrinsicID =
9569 switch (IntrinsicID) {
9573 case Intrinsic::ppc_altivec_vcmpbfp_p:
9577 case Intrinsic::ppc_altivec_vcmpeqfp_p:
9581 case Intrinsic::ppc_altivec_vcmpequb_p:
9585 case Intrinsic::ppc_altivec_vcmpequh_p:
9589 case Intrinsic::ppc_altivec_vcmpequw_p:
9593 case Intrinsic::ppc_altivec_vcmpequd_p:
9600 case Intrinsic::ppc_altivec_vcmpneb_p:
9601 case Intrinsic::ppc_altivec_vcmpneh_p:
9602 case Intrinsic::ppc_altivec_vcmpnew_p:
9603 case Intrinsic::ppc_altivec_vcmpnezb_p:
9604 case Intrinsic::ppc_altivec_vcmpnezh_p:
9605 case Intrinsic::ppc_altivec_vcmpnezw_p:
9607 switch (IntrinsicID) {
9610 case Intrinsic::ppc_altivec_vcmpneb_p:
9613 case Intrinsic::ppc_altivec_vcmpneh_p:
9616 case Intrinsic::ppc_altivec_vcmpnew_p:
9619 case Intrinsic::ppc_altivec_vcmpnezb_p:
9622 case Intrinsic::ppc_altivec_vcmpnezh_p:
9625 case Intrinsic::ppc_altivec_vcmpnezw_p:
9633 case Intrinsic::ppc_altivec_vcmpgefp_p:
9637 case Intrinsic::ppc_altivec_vcmpgtfp_p:
9641 case Intrinsic::ppc_altivec_vcmpgtsb_p:
9645 case Intrinsic::ppc_altivec_vcmpgtsh_p:
9649 case Intrinsic::ppc_altivec_vcmpgtsw_p:
9653 case Intrinsic::ppc_altivec_vcmpgtsd_p:
9660 case Intrinsic::ppc_altivec_vcmpgtub_p:
9664 case Intrinsic::ppc_altivec_vcmpgtuh_p:
9668 case Intrinsic::ppc_altivec_vcmpgtuw_p:
9672 case Intrinsic::ppc_altivec_vcmpgtud_p:
9680 case Intrinsic::ppc_altivec_vcmpequq:
9681 case Intrinsic::ppc_altivec_vcmpgtsq:
9682 case Intrinsic::ppc_altivec_vcmpgtuq:
9685 switch (IntrinsicID) {
9688 case Intrinsic::ppc_altivec_vcmpequq:
9691 case Intrinsic::ppc_altivec_vcmpgtsq:
9694 case Intrinsic::ppc_altivec_vcmpgtuq:
9701 case Intrinsic::ppc_vsx_xvcmpeqdp_p:
9702 case Intrinsic::ppc_vsx_xvcmpgedp_p:
9703 case Intrinsic::ppc_vsx_xvcmpgtdp_p:
9704 case Intrinsic::ppc_vsx_xvcmpeqsp_p:
9705 case Intrinsic::ppc_vsx_xvcmpgesp_p:
9706 case Intrinsic::ppc_vsx_xvcmpgtsp_p:
9707 if (Subtarget.
hasVSX()) {
9708 switch (IntrinsicID) {
9709 case Intrinsic::ppc_vsx_xvcmpeqdp_p:
9712 case Intrinsic::ppc_vsx_xvcmpgedp_p:
9715 case Intrinsic::ppc_vsx_xvcmpgtdp_p:
9718 case Intrinsic::ppc_vsx_xvcmpeqsp_p:
9721 case Intrinsic::ppc_vsx_xvcmpgesp_p:
9724 case Intrinsic::ppc_vsx_xvcmpgtsp_p:
9734 case Intrinsic::ppc_altivec_vcmpbfp:
9737 case Intrinsic::ppc_altivec_vcmpeqfp:
9740 case Intrinsic::ppc_altivec_vcmpequb:
9743 case Intrinsic::ppc_altivec_vcmpequh:
9746 case Intrinsic::ppc_altivec_vcmpequw:
9749 case Intrinsic::ppc_altivec_vcmpequd:
9755 case Intrinsic::ppc_altivec_vcmpneb:
9756 case Intrinsic::ppc_altivec_vcmpneh:
9757 case Intrinsic::ppc_altivec_vcmpnew:
9758 case Intrinsic::ppc_altivec_vcmpnezb:
9759 case Intrinsic::ppc_altivec_vcmpnezh:
9760 case Intrinsic::ppc_altivec_vcmpnezw:
9762 switch (IntrinsicID) {
9765 case Intrinsic::ppc_altivec_vcmpneb:
9768 case Intrinsic::ppc_altivec_vcmpneh:
9771 case Intrinsic::ppc_altivec_vcmpnew:
9774 case Intrinsic::ppc_altivec_vcmpnezb:
9777 case Intrinsic::ppc_altivec_vcmpnezh:
9780 case Intrinsic::ppc_altivec_vcmpnezw:
9787 case Intrinsic::ppc_altivec_vcmpgefp:
9790 case Intrinsic::ppc_altivec_vcmpgtfp:
9793 case Intrinsic::ppc_altivec_vcmpgtsb:
9796 case Intrinsic::ppc_altivec_vcmpgtsh:
9799 case Intrinsic::ppc_altivec_vcmpgtsw:
9802 case Intrinsic::ppc_altivec_vcmpgtsd:
9808 case Intrinsic::ppc_altivec_vcmpgtub:
9811 case Intrinsic::ppc_altivec_vcmpgtuh:
9814 case Intrinsic::ppc_altivec_vcmpgtuw:
9817 case Intrinsic::ppc_altivec_vcmpgtud:
9823 case Intrinsic::ppc_altivec_vcmpequq_p:
9824 case Intrinsic::ppc_altivec_vcmpgtsq_p:
9825 case Intrinsic::ppc_altivec_vcmpgtuq_p:
9828 switch (IntrinsicID) {
9831 case Intrinsic::ppc_altivec_vcmpequq_p:
9834 case Intrinsic::ppc_altivec_vcmpgtsq_p:
9837 case Intrinsic::ppc_altivec_vcmpgtuq_p:
9851 unsigned IntrinsicID =
9856 switch (IntrinsicID) {
9857 case Intrinsic::thread_pointer:
9863 case Intrinsic::ppc_mma_disassemble_acc:
9864 case Intrinsic::ppc_vsx_disassemble_pair: {
9867 if (IntrinsicID == Intrinsic::ppc_mma_disassemble_acc) {
9878 RetOps.push_back(Extract);
9894 Op.getOperand(1),
Op.getOperand(2),
9954 case Intrinsic::ppc_cfence: {
9955 assert(
ArgStart == 1 &&
"llvm.ppc.cfence must carry a chain argument.");
9956 assert(Subtarget.
isPPC64() &&
"Only 64-bit is supported for now.");
9978 int VectorIndex = 0;
9991 "Expecting an atomic compare-and-swap here.");
9995 if (
MemVT.getSizeInBits() >= 32)
10041 "Should only be called for ISD::INSERT_VECTOR_ELT");
10048 EVT VT =
Op.getValueType();
10073 EVT VT =
Op.getValueType();
10082 "Type unsupported without MMA");
10084 "Type unsupported without paired vector support");
10085 Align Alignment =
LN->getAlign();
10092 LN->getPointerInfo().getWithOffset(
Idx * 16),
10094 LN->getMemOperand()->getFlags(),
LN->getAAInfo());
10097 Loads.push_back(Load);
10101 std::reverse(Loads.begin(), Loads.end());
10128 "Type unsupported without MMA");
10130 "Type unsupported without paired vector support");
10131 Align Alignment =
SN->getAlign();
10144 SN->getPointerInfo().getWithOffset(
Idx * 16),
10146 SN->getMemOperand()->getFlags(),
SN->getAAInfo());
10149 Stores.push_back(Store);
10158 SDValue LHS =
Op.getOperand(0), RHS =
Op.getOperand(1);
10183 SDValue LHS =
Op.getOperand(0), RHS =
Op.getOperand(1);
10201 for (
unsigned i = 0;
i != 8; ++
i) {
10202 if (isLittleEndian) {
10204 Ops[
i*2+1] = 2*
i+16;
10207 Ops[
i*2+1] = 2*
i+1+16;
10210 if (isLittleEndian)
10222 !Subtarget.hasP9Vector())
10232 "Should only be called for ISD::FP_EXTEND");
10249 "Node should have 2 operands with second one being a constant!");
10284 LD->getMemoryVT(),
LD->getMemOperand());
10297 LD->getMemoryVT(),
LD->getMemOperand());
10308 switch (Op.getOpcode()) {
10315 case ISD::SETCC:
return LowerSETCC(Op, DAG);
10321 case ISD::VAARG:
return LowerVAARG(Op, DAG);
10327 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
10334 case ISD::LOAD:
return LowerLOAD(Op, DAG);
10335 case ISD::STORE:
return LowerSTORE(Op, DAG);
10353 case ISD::FSHL:
return LowerFunnelShift(Op, DAG);
10354 case ISD::FSHR:
return LowerFunnelShift(Op, DAG);
10362 case ISD::MUL:
return LowerMUL(Op, DAG);
10366 return LowerFP_ROUND(Op, DAG);
10367 case ISD::ROTL:
return LowerROTL(Op, DAG);
10379 return LowerINTRINSIC_VOID(Op, DAG);
10381 return LowerBSWAP(Op, DAG);
10383 return LowerATOMIC_CMP_SWAP(Op, DAG);
10391 switch (
N->getOpcode()) {
10393 llvm_unreachable(
"Do not know how to custom type legalize this operation!");
10405 Intrinsic::loop_decrement)
10409 "Unexpected result type for CTR decrement intrinsic");
10411 N->getValueType(0));
10424 EVT VT =
N->getValueType(0);
10439 if (
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0).
getValueType() ==
10445 if (!
N->getValueType(0).isVector())
10472 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
10474 return Builder.CreateCall(Func, {});
10497 return Builder.CreateCall(
10499 Builder.GetInsertBlock()->getParent()->getParent(),
10500 Intrinsic::ppc_cfence, {Inst->getType()}),
10562 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10566 RegInfo.createVirtualRegister(
AtomicSize == 8 ? &PPC::G8RCRegClass
10567 : &PPC::GPRCRegClass);
10599 Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
10628 switch(
MI.getOpcode()) {
10632 return TII->isSignExtended(
MI);
10656 case PPC::EXTSB8_32_64:
10657 case PPC::EXTSB8_rec:
10658 case PPC::EXTSB_rec:
10661 case PPC::EXTSH8_32_64:
10662 case PPC::EXTSH8_rec:
10663 case PPC::EXTSH_rec:
10665 case PPC::EXTSWSLI:
10666 case PPC::EXTSWSLI_32_64:
10667 case PPC::EXTSWSLI_32_64_rec:
10668 case PPC::EXTSWSLI_rec:
10669 case PPC::EXTSW_32:
10670 case PPC::EXTSW_32_64:
10671 case PPC::EXTSW_32_64_rec:
10672 case PPC::EXTSW_rec:
10675 case PPC::SRAWI_rec:
10676 case PPC::SRAW_rec:
10701 .
addReg(
MI.getOperand(3).getReg());
10713 bool is64bit = Subtarget.
isPPC64();
10715 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
10734 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10737 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
10743 isLittleEndian ?
Shift1Reg : RegInfo.createVirtualRegister(
GPRC);
10781 Ptr1Reg = RegInfo.createVirtualRegister(RC);
10795 if (!isLittleEndian)
10796 BuildMI(BB, dl,
TII->get(PPC::XORI), ShiftReg)
10900 Register DstReg =
MI.getOperand(0).getReg();
10908 "Invalid Pointer Size!");
10936 sinkMBB->transferSuccessorsAndUpdatePHIs(
MBB);
10972 BaseReg = Subtarget.
isPPC64() ? PPC::X1 : PPC::R1;
10974 BaseReg = Subtarget.
isPPC64() ? PPC::BP8 : PPC::BP;
10977 TII->get(Subtarget.
isPPC64() ? PPC::STD : PPC::STW))
11021 TII->get(PPC::PHI), DstReg)
11025 MI.eraseFromParent();
11040 "Invalid Pointer Size!");
11043 (
PVT ==
MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11047 unsigned SP = (
PVT ==
MVT::i64) ? PPC::X1 : PPC::R1;
11057 const int64_t SPOffset = 2 *
PVT.getStoreSize();
11127 MI.eraseFromParent();
11143 "Unexpected stack alignment");
11146 unsigned StackProbeSize = 4096;
11153 StackProbeSize &= ~(StackAlign - 1);
11154 return StackProbeSize ? StackProbeSize : StackAlign;
11166 const bool isPPC64 = Subtarget.
isPPC64();
11205 Register DstReg =
MI.getOperand(0).getReg();
11207 Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
11218 isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32;
11224 ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64
11225 : PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32;
11229 .
add(
MI.getOperand(2))
11230 .
add(
MI.getOperand(3));
11302 MRI.createVirtualRegister(isPPC64 ?
G8RC :
GPRC);
11304 TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET),
11306 .
add(
MI.getOperand(2))
11307 .add(
MI.getOperand(3));
11315 TailMBB->transferSuccessorsAndUpdatePHIs(
MBB);
11319 MI.eraseFromParent();
11328 if (
MI.getOpcode() == TargetOpcode::STACKMAP ||
11329 MI.getOpcode() == TargetOpcode::PATCHPOINT) {
11331 MI.getOpcode() == TargetOpcode::PATCHPOINT &&
11344 if (
MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
11345 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
11347 }
else if (
MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
11348 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
11361 if (
MI.getOpcode() == PPC::SELECT_CC_I4 ||
11362 MI.getOpcode() == PPC::SELECT_CC_I8 ||
MI.getOpcode() == PPC::SELECT_I4 ||
11363 MI.getOpcode() == PPC::SELECT_I8) {
11365 if (
MI.getOpcode() == PPC::SELECT_CC_I4 ||
11366 MI.getOpcode() == PPC::SELECT_CC_I8)
11367 Cond.push_back(
MI.getOperand(4));
11370 Cond.push_back(
MI.getOperand(1));
11373 TII->insertSelect(*BB,
MI, dl,
MI.getOperand(0).getReg(),
Cond,
11374 MI.getOperand(2).getReg(),
MI.getOperand(3).getReg());
11375 }
else if (
MI.getOpcode() == PPC::SELECT_CC_F4 ||
11376 MI.getOpcode() == PPC::SELECT_CC_F8 ||
11377 MI.getOpcode() == PPC::SELECT_CC_F16 ||
11378 MI.getOpcode() == PPC::SELECT_CC_VRRC ||
11379 MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
11380 MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
11381 MI.getOpcode() == PPC::SELECT_CC_VSRC ||
11382 MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
11383 MI.getOpcode() == PPC::SELECT_CC_SPE ||
11384 MI.getOpcode() == PPC::SELECT_F4 ||
11385 MI.getOpcode() == PPC::SELECT_F8 ||
11386 MI.getOpcode() == PPC::SELECT_F16 ||
11387 MI.getOpcode() == PPC::SELECT_SPE ||
11388 MI.getOpcode() == PPC::SELECT_SPE4 ||
11389 MI.getOpcode() == PPC::SELECT_VRRC ||
11390 MI.getOpcode() == PPC::SELECT_VSFRC ||
11391 MI.getOpcode() == PPC::SELECT_VSSRC ||
11392 MI.getOpcode() == PPC::SELECT_VSRC) {
11413 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11419 if (
MI.getOpcode() == PPC::SELECT_I4 ||
MI.getOpcode() == PPC::SELECT_I8 ||
11420 MI.getOpcode() == PPC::SELECT_F4 ||
MI.getOpcode() == PPC::SELECT_F8 ||
11421 MI.getOpcode() == PPC::SELECT_F16 ||
11422 MI.getOpcode() == PPC::SELECT_SPE4 ||
11423 MI.getOpcode() == PPC::SELECT_SPE ||
11424 MI.getOpcode() == PPC::SELECT_VRRC ||
11425 MI.getOpcode() == PPC::SELECT_VSFRC ||
11426 MI.getOpcode() == PPC::SELECT_VSSRC ||
11427 MI.getOpcode() == PPC::SELECT_VSRC) {
11429 .
addReg(
MI.getOperand(1).getReg())
11435 .
addReg(
MI.getOperand(1).getReg())
11452 .
addReg(
MI.getOperand(3).getReg())
11454 .
addReg(
MI.getOperand(2).getReg())
11456 }
else if (
MI.getOpcode() == PPC::ReadTB) {
11478 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11492 Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11504 }
else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
11506 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
11508 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
11510 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
11513 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
11515 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
11517 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
11519 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
11522 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
11524 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
11526 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
11528 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
11531 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
11533 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
11535 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
11537 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
11540 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
11542 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
11544 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
11546 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
11549 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
11551 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
11553 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
11555 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
11558 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
11560 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
11562 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
11564 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
11567 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
11569 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
11571 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
11573 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
11576 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
11578 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
11580 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
11582 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
11585 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
11587 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
11589 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
11591 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
11594 else if (
MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
11596 else if (
MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
11598 else if (
MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
11600 else if (
MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
11602 else if (
MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
11603 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
11605 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
11607 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
11608 bool is64bit =
MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
11612 switch (
MI.getOpcode()) {
11615 case PPC::ATOMIC_CMP_SWAP_I8:
11620 case PPC::ATOMIC_CMP_SWAP_I16:
11625 case PPC::ATOMIC_CMP_SWAP_I32:
11629 case PPC::ATOMIC_CMP_SWAP_I64:
11651 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11671 BuildMI(BB, dl,
TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
11704 }
else if (
MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
11705 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
11709 bool is64bit = Subtarget.
isPPC64();
11711 bool is8bit =
MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
11730 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11734 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11740 isLittleEndian ?
Shift1Reg : RegInfo.createVirtualRegister(
GPRC);
11752 Register TmpReg = RegInfo.createVirtualRegister(
GPRC);
11787 Ptr1Reg = RegInfo.createVirtualRegister(RC);
11802 if (!isLittleEndian)
11803 BuildMI(BB, dl,
TII->get(PPC::XORI), ShiftReg)
11848 BuildMI(BB, dl,
TII->get(PPC::CMPW), PPC::CR0)
11890 }
else if (
MI.getOpcode() == PPC::FADDrtz) {
11900 Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
11915 auto MIB =
BuildMI(*BB,
MI, dl,
TII->get(PPC::FADD), Dest)
11923 }
else if (
MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
11924 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
11925 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
11926 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
11927 unsigned Opcode = (
MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
11928 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
11931 bool IsEQ = (
MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
11932 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
11935 Register Dest = RegInfo.createVirtualRegister(
11936 Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
11940 .
addReg(
MI.getOperand(1).getReg())
11943 MI.getOperand(0).getReg())
11945 }
else if (
MI.getOpcode() == PPC::TCHECK_RET) {
11948 Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11951 MI.getOperand(0).getReg())
11953 }
else if (
MI.getOpcode() == PPC::TBEGIN_RET) {
11955 unsigned Imm =
MI.getOperand(1).getImm();
11958 MI.getOperand(0).getReg())
11960 }
else if (
MI.getOpcode() == PPC::SETRNDi) {
11976 unsigned Mode =
MI.getOperand(1).getImm();
11977 BuildMI(*BB,
MI, dl,
TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
11981 BuildMI(*BB,
MI, dl,
TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
11984 }
else if (
MI.getOpcode() == PPC::SETRND) {
11994 BuildMI(*BB,
MI, dl,
TII->get(TargetOpcode::COPY), DestReg)
12001 if (RC == &PPC::F8RCRegClass) {
12003 assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
12004 "Unsupported RegClass.");
12010 assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
12011 (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
12012 "Unsupported RegClass.");
12093 }
else if (
MI.getOpcode() == PPC::SETFLM) {
12107 }
else if (
MI.getOpcode() == PPC::PROBED_ALLOCA_32 ||
12108 MI.getOpcode() == PPC::PROBED_ALLOCA_64) {
12114 MI.eraseFromParent();
12136 EVT VT =
Op.getValueType();
12163PPCTargetLowering::getSqrtResultForDenormInput(
SDValue Op,
12166 EVT VT =
Op.getValueType();
12209unsigned PPCTargetLowering::combineRepeatedFPDivisors()
const {
12247 unsigned Bytes,
int Dist,
12261 if (FS !=
BFS || FS != (
int)Bytes)
return false;
12287 unsigned Bytes,
int Dist,
12290 EVT VT = LS->getMemoryVT();
12291 SDValue Loc = LS->getBasePtr();
12298 default:
return false;
12299 case Intrinsic::ppc_altivec_lvx:
12300 case Intrinsic::ppc_altivec_lvxl:
12301 case Intrinsic::ppc_vsx_lxvw4x:
12302 case Intrinsic::ppc_vsx_lxvw4x_be:
12305 case Intrinsic::ppc_vsx_lxvd2x:
12306 case Intrinsic::ppc_vsx_lxvd2x_be:
12309 case Intrinsic::ppc_altivec_lvebx:
12312 case Intrinsic::ppc_altivec_lvehx:
12315 case Intrinsic::ppc_altivec_lvewx:
12326 default:
return false;
12327 case Intrinsic::ppc_altivec_stvx:
12328 case Intrinsic::ppc_altivec_stvxl:
12329 case Intrinsic::ppc_vsx_stxvw4x:
12332 case Intrinsic::ppc_vsx_stxvd2x:
12335 case Intrinsic::ppc_vsx_stxvw4x_be:
12338 case Intrinsic::ppc_vsx_stxvd2x_be:
12341 case Intrinsic::ppc_altivec_stvebx:
12344 case Intrinsic::ppc_altivec_stvehx:
12347 case Intrinsic::ppc_altivec_stvewx:
12364 SDValue Chain = LD->getChain();
12365 EVT VT = LD->getMemoryVT();
12374 while (!Queue.empty()) {
12383 if (!Visited.count(
ChainLD->getChain().getNode()))
12384 Queue.push_back(
ChainLD->getChain().getNode());
12387 if (!Visited.count(O.getNode()))
12388 Queue.push_back(O.getNode());
12403 Queue.push_back(*
I);
12405 while (!Queue.empty()) {
12407 if (!Visited.insert(
LoadRoot).second)
12415 UE =
LoadRoot->use_end(); UI != UE; ++UI)
12419 Queue.push_back(*UI);
12463 DAGCombinerInfo &
DCI)
const {
12471 if (!
DCI.isAfterLegalizeDAG())
12477 UE =
N->
use_end(); UI != UE; ++UI) {
12483 auto OpSize =
N->getOperand(0).getValueSizeInBits();
12505 DAGCombinerInfo &
DCI)
const {
12523 if (
N->getOperand(0).getValueType() !=
MVT::i32 &&
12524 N->getOperand(0).getValueType() !=
MVT::i64)
12534 unsigned OpBits =
N->getOperand(0).getValueSizeInBits();
12545 return (
N->getOpcode() ==
ISD::SETCC ? ConvertSETCCToSubtract(
N,
DCI)
12568 if (
N->getOperand(0).getOpcode() !=
ISD::AND &&
12569 N->getOperand(0).getOpcode() !=
ISD::OR &&
12570 N->getOperand(0).getOpcode() !=
ISD::XOR &&
12580 N->getOperand(1).getOpcode() !=
ISD::AND &&
12581 N->getOperand(1).getOpcode() !=
ISD::OR &&
12582 N->getOperand(1).getOpcode() !=
ISD::XOR &&
12595 for (
unsigned i = 0;
i < 2; ++
i) {
12599 N->getOperand(
i).getOperand(0).getValueType() ==
MVT::i1) ||
12601 Inputs.push_back(
N->getOperand(
i));
12603 BinOps.push_back(
N->getOperand(
i));
12611 while (!
BinOps.empty()) {
12614 if (!Visited.insert(BinOp.
getNode()).second)
12653 for (
unsigned i = 0,
ie = Inputs.size();
i !=
ie; ++
i) {
12658 UE = Inputs[
i].getNode()->use_end();
12682 UE =
PromOps[
i].getNode()->use_end();
12705 for (
unsigned i = 0,
ie = Inputs.size();
i !=
ie; ++
i) {
12747 switch (
PromOp.getOpcode()) {
12748 default:
C = 0;
break;
12766 PromOp.getNode()->op_end());
12769 for (
unsigned i = 0;
i < 2; ++
i)
12779 return N->getOperand(0);
12787 DAGCombinerInfo &
DCI)
const {
12813 if (
N->getOperand(0).getOpcode() !=
ISD::AND &&
12814 N->getOperand(0).getOpcode() !=
ISD::OR &&
12815 N->getOperand(0).getOpcode() !=
ISD::XOR &&
12826 while (!
BinOps.empty()) {
12829 if (!Visited.insert(BinOp.
getNode()).second)
12865 for (
unsigned i = 0,
ie = Inputs.size();
i !=
ie; ++
i) {
12870 UE = Inputs[
i].getNode()->use_end();
12895 UE =
PromOps[
i].getNode()->use_end();
12918 unsigned PromBits =
N->getOperand(0).getValueSizeInBits();
12923 for (
unsigned i = 0,
ie = Inputs.size();
i !=
ie; ++
i) {
12928 Inputs[
i].getOperand(0).getValueSizeInBits();
12946 for (
unsigned i = 0,
ie = Inputs.size();
i !=
ie; ++
i) {
12980 switch (
PromOp.getOpcode()) {
12981 default:
C = 0;
break;
12987 PromOp.getOperand(
C).getValueType() !=
N->getValueType(0)) ||
12989 PromOp.getOperand(
C+1).getValueType() !=
N->getValueType(0))) {
13003 PromOp.getOperand(0).getValueType() !=
N->getValueType(0)) ||
13005 PromOp.getOperand(1).getValueType() !=
N->getValueType(0))) {
13012 PromOp.getNode()->op_end());
13015 for (
unsigned i = 0;
i < 2; ++
i) {
13047 return N->getOperand(0);
13055 dl,
N->getValueType(0)));
13058 "Invalid extension type");
13069 DAGCombinerInfo &
DCI)
const {
13071 "Should be called with a SETCC node");
13089 EVT VT =
N->getValueType(0);
13090 EVT OpVT = LHS.getValueType();
13096 return DAGCombineTruncBoolExt(
N,
DCI);
13115combineElementTruncationToVectorTruncation(
SDNode *
N,
13116 DAGCombinerInfo &
DCI)
const {
13118 "Should be called with a BUILD_VECTOR node");
13125 "The input operand must be an fp-to-int conversion.");
13134 bool IsSplat =
true;
13140 for (
int i = 0, e =
N->getNumOperands();
i <
e; ++
i) {
13164 for (
int i = 0, e =
N->getNumOperands();
i <
e; ++
i) {
13175 Ops.push_back(Trunc);
13203 "Should be called with a BUILD_VECTOR node");
13208 if (!
N->getValueType(0).getVectorElementType().isByteSized())
13213 unsigned ElemSize =
N->getValueType(0).getScalarType().getStoreSize();
13224 N->getNumOperands() == 1)
13227 for (
int i = 1, e =
N->getNumOperands();
i < e; ++
i) {
13257 "The loads cannot be both consecutive and reverse consecutive.");
13263 N->getOperand(
N->getNumOperands()-1);
13268 assert(
LD1 &&
"Input needs to be a LoadSDNode.");
13269 return DAG.
getLoad(
N->getValueType(0), dl,
LD1->getChain(),
13270 LD1->getBasePtr(),
LD1->getPointerInfo(),
13271 LD1->getAlignment());
13274 assert(LDL &&
"Input needs to be a LoadSDNode.");
13276 LDL->getBasePtr(), LDL->getPointerInfo(),
13277 LDL->getAlignment());
13279 for (
int i =
N->getNumOperands() - 1;
i >= 0;
i--)
13283 DAG.
getUNDEF(
N->getValueType(0)), Ops);
13296 unsigned NumElems = Input.getValueType().getVectorNumElements();
13302 for (
unsigned i = 0;
i <
N->getNumOperands();
i++) {
13313 DAG.
getUNDEF(Input.getValueType()), ShuffleMask);
13315 EVT VT =
N->getValueType(0);
13319 Input.getValueType().getVectorElementType(),
13349 uint64_t
Elems = 0;
13362 SDValue Extract = Op.getOperand(0);
13372 Index =
ExtOp->getZExtValue();
13373 if (Input && Input != Extract.
getOperand(0))
13388 for (
unsigned i = 0;
i <
N->getNumOperands();
i++) {
13397 int InputSize = Input.getValueType().getScalarSizeInBits();
13398 int OutputSize =
N->getValueType(0).getScalarSizeInBits();
13442 EVT MemoryType = LD->getMemoryVT();
13456 LD->getChain(), LD->getBasePtr(),
13461 LoadOps, MemoryType, LD->getMemOperand());
13465 DAGCombinerInfo &
DCI)
const {
13467 "Should be called with a BUILD_VECTOR node");
13472 if (!Subtarget.
hasVSX())
13521 if (
FirstInput.getOpcode() !=
N->getOperand(1).getOpcode())
13535 Ext1.getOperand(0) !=
Ext2.getOperand(0))
13556 DAGCombinerInfo &
DCI)
const {
13559 "Need an int -> FP conversion node here");
13572 if (!
Op.getOperand(0).getValueType().isSimple())
13574 if (
Op.getOperand(0).getValueType().getSimpleVT() <=
MVT(
MVT::i1) ||
13575 Op.getOperand(0).getValueType().getSimpleVT() >
MVT(
MVT::i64))
13611 if (
Op.getOperand(0).getValueType() ==
MVT::i32)
13615 "UINT_TO_FP is supported only with FPCVT");
13633 SDValue Src =
Op.getOperand(0).getOperand(0);
13652 DCI.AddToWorklist(
FP.getNode());
13671 switch (
N->getOpcode()) {
13676 Chain = LD->getChain();
13677 Base = LD->getBasePtr();
13678 MMO = LD->getMemOperand();
13688 Chain =
Intrin->getChain();
13691 Base =
Intrin->getOperand(2);
13692 MMO =
Intrin->getMemOperand();
13697 MVT VecTy =
N->getValueType(0).getSimpleVT();
13711 DCI.AddToWorklist(Load.getNode());
13712 Chain = Load.getValue(1);
13720 DCI.AddToWorklist(
N.getNode());
13740 switch (
N->getOpcode()) {
13745 Chain = ST->getChain();
13746 Base = ST->getBasePtr();
13747 MMO = ST->getMemOperand();
13758 Chain =
Intrin->getChain();
13760 Base =
Intrin->getOperand(3);
13761 MMO =
Intrin->getMemOperand();
13791 DCI.AddToWorklist(Store.getNode());
13797 DAGCombinerInfo &
DCI)
const {
13801 unsigned Opcode =
N->getOperand(1).getOpcode();
13804 &&
"Not a FP_TO_INT Instruction!");
13807 EVT Op1VT =
N->getOperand(1).getValueType();
13823 if (
ResVT.getScalarSizeInBits() == 32) {
13831 PPCISD::FP_TO_UINT_IN_VSR;
13838 unsigned ByteSize =
Op1VT.getScalarSizeInBits() / 8;
13856 for (
int i = 1, e = Mask.size();
i < e;
i++) {
13872 for (
int i = 0, e = Op.getNumOperands();
i < e;
i++) {
13873 FirstOp = Op.getOperand(
i);
13879 for (
int i = 1, e = Op.getNumOperands();
i < e;
i++)
13880 if (Op.getOperand(
i) != FirstOp && !Op.getOperand(
i).isUndef())
13890 Op = Op.getOperand(0);
13899 for (
int i = 0, e =
ShuffV.size();
i < e;
i++) {
13915 "Expecting a SCALAR_TO_VECTOR here");
13947 int NumElts = LHS.getValueType().getVectorNumElements();
13974 :
SToVRHS.getValueType().getVectorNumElements();
13982 int HalfVec = LHS.getValueType().getVectorNumElements() / 2;
13990 if (
SToVLHS.getValueType() != LHS.getValueType())
13998 if (
SToVRHS.getValueType() != RHS.getValueType())
14038 for (
int i = 1, e =
Mask.size();
i <
e;
i += 2)
14043 for (
int i = 0, e =
Mask.size();
i <
e;
i += 2)
14057 DAGCombinerInfo &
DCI)
const {
14059 "Not a reverse memop pattern!");
14064 auto I =
Mask.rbegin();
14065 auto E =
Mask.rend();
14067 for (;
I !=
E; ++
I) {
14076 EVT VT =
SVN->getValueType(0);
14114 switch (
N->getOpcode()) {
14117 return combineADD(
N,
DCI);
14119 return combineSHL(
N,
DCI);
14121 return combineSRA(
N,
DCI);
14123 return combineSRL(
N,
DCI);
14125 return combineMUL(
N,
DCI);
14128 return combineFMALike(
N,
DCI);
14131 return N->getOperand(0);
14135 return N->getOperand(0);
14139 if (
C->isNullValue() ||
14140 C->isAllOnesValue())
14141 return N->getOperand(0);
14147 return DAGCombineExtBoolTrunc(
N,
DCI);
14149 return combineTRUNCATE(
N,
DCI);
14155 return DAGCombineTruncBoolExt(
N,
DCI);
14158 return combineFPToIntToFP(
N,
DCI);
14167 EVT Op1VT =
N->getOperand(1).getValueType();
14168 unsigned Opcode =
N->getOperand(1).getOpcode();
14185 N->getOperand(1).getNode()->hasOneUse() &&
14192 if (
mVT.isExtended() ||
mVT.getSizeInBits() < 16)
14203 int Shift =
Op1VT.getSizeInBits() -
mVT.getSizeInBits();
14222 if (Subtarget.
isPPC64() && !
DCI.isBeforeLegalize() &&
14227 MemVT.getSizeInBits());
14241 if (
Op1VT.isSimple()) {
14252 EVT VT = LD->getValueType(0);
14287 if (!LD->hasNUsesOfValue(2, 0))
14290 auto UI = LD->use_begin();
14291 while (UI.getUse().getResNo() != 0) ++UI;
14293 while (UI.getUse().getResNo() != 0) ++UI;
14294 SDNode *RightShift = *UI;
14302 if (RightShift->getOpcode() !=
ISD::SRL ||
14304 RightShift->getConstantOperandVal(1) != 32 ||
14305 !RightShift->hasOneUse())
14318 Bitcast->getValueType(0) !=
MVT::f32)
14330 SDValue BasePtr = LD->getBasePtr();
14331 if (LD->isIndexed()) {
14333 "Non-pre-inc AM on PPC?");
14342 LD->getPointerInfo(), LD->getAlignment(),
14343 MMOFlags, LD->getAAInfo());
14349 LD->getPointerInfo().getWithOffset(4),
14350 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
14352 if (LD->isIndexed()) {
14372 if (LD->isUnindexed() && VT.
isVector() &&
14380 SDValue Chain = LD->getChain();
14381 SDValue Ptr = LD->getBasePtr();
14410 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr
14411 : Intrinsic::ppc_altivec_lvsl;
14412 IntrLD = Intrinsic::ppc_altivec_lvx;
14413 IntrPerm = Intrinsic::ppc_altivec_vperm;
14428 -(
long)
MemVT.getStoreSize()+1,
14429 2*
MemVT.getStoreSize()-1);
14463 1, 2*
MemVT.getStoreSize()-1);
14479 if (isLittleEndian)
14505 : Intrinsic::ppc_altivec_lvsl);
14506 if (IID ==
Intr &&
N->getOperand(1)->getOpcode() ==
ISD::ADD) {
14513 .zext(
Add.getScalarValueSizeInBits()))) {
14514 SDNode *BasePtr =
Add->getOperand(0).getNode();
14516 UE = BasePtr->use_end();
14531 SDNode *BasePtr =
Add->getOperand(0).getNode();
14533 UE = BasePtr->use_end(); UI != UE; ++UI) {
14534 if (UI->getOpcode() ==
ISD::ADD &&
14538 (1ULL << Bits) == 0) {
14555 (IID == Intrinsic::ppc_altivec_vmaxsw ||
14556 IID == Intrinsic::ppc_altivec_vmaxsh ||
14557 IID == Intrinsic::ppc_altivec_vmaxsb)) {
14563 V1.getSimpleValueType() == V2.getSimpleValueType()) {
14567 V1.getOperand(1) == V2) {
14573 V2.getOperand(1) ==
V1) {
14578 V1.getOperand(0) == V2.getOperand(1) &&
14579 V1.getOperand(1) == V2.getOperand(0)) {
14594 case Intrinsic::ppc_vsx_lxvw4x:
14595 case Intrinsic::ppc_vsx_lxvd2x:
14607 case Intrinsic::ppc_vsx_stxvw4x:
14608 case Intrinsic::ppc_vsx_stxvd2x:
14616 N->getOperand(0).hasOneUse() &&
14632 Ops, LD->getMemoryVT(), LD->getMemOperand());
14656 if (!
N->getOperand(0).hasOneUse() &&
14657 !
N->getOperand(1).hasOneUse() &&
14658 !
N->getOperand(2).hasOneUse()) {
14667 UI->getOperand(1) ==
N->getOperand(1) &&
14668 UI->getOperand(2) ==
N->getOperand(2) &&
14669 UI->getOperand(0) ==
N->getOperand(0)) {
14707 Intrinsic::loop_decrement) {
14713 "Counter decrement has more than one use");
14730 if (LHS.getOpcode() ==
ISD::AND &&
14733 Intrinsic::loop_decrement &&
14740 Intrinsic::loop_decrement &&
14743 "Counter decrement comparison is not EQ or NE");
14752 assert(LHS.getNode()->hasOneUse() &&
14753 "Counter decrement has more than one use");
14756 N->getOperand(0),
N->getOperand(4));
14765 assert(
isDot &&
"Can't compare against a vector result!");
14770 if (Val != 0 && Val != 1) {
14772 return N->getOperand(0);
14775 N->getOperand(0),
N->getOperand(4));
14786 EVT VTs[] = { LHS.getOperand(2).getValueType(),
MVT::Glue };
14815 return DAGCombineBuildVector(
N,
DCI);
14817 return combineABS(
N,
DCI);
14819 return combineVSelect(
N,
DCI);
14830 EVT VT =
N->getValueType(0);
14834 !(Divisor.
isPowerOf2() || (-Divisor).isPowerOf2()))
14840 bool IsNegPow2 = (-Divisor).isPowerOf2();
14845 Created.push_back(Op.getNode());
14849 Created.push_back(Op.getNode());
14863 unsigned Depth)
const {
14865 switch (Op.getOpcode()) {
14870 Known.
Zero = 0xFFFF0000;
14876 case Intrinsic::ppc_altivec_vcmpbfp_p:
14877 case Intrinsic::ppc_altivec_vcmpeqfp_p:
14878 case Intrinsic::ppc_altivec_vcmpequb_p:
14879 case Intrinsic::ppc_altivec_vcmpequh_p:
14880 case Intrinsic::ppc_altivec_vcmpequw_p:
14881 case Intrinsic::ppc_altivec_vcmpequd_p:
14882 case Intrinsic::ppc_altivec_vcmpequq_p:
14883 case Intrinsic::ppc_altivec_vcmpgefp_p:
14884 case Intrinsic::ppc_altivec_vcmpgtfp_p:
14885 case Intrinsic::ppc_altivec_vcmpgtsb_p:
14886 case Intrinsic::ppc_altivec_vcmpgtsh_p:
14887 case Intrinsic::ppc_altivec_vcmpgtsw_p:
14888 case Intrinsic::ppc_altivec_vcmpgtsd_p:
14889 case Intrinsic::ppc_altivec_vcmpgtsq_p:
14890 case Intrinsic::ppc_altivec_vcmpgtub_p:
14891 case Intrinsic::ppc_altivec_vcmpgtuh_p:
14892 case Intrinsic::ppc_altivec_vcmpgtuw_p:
14893 case Intrinsic::ppc_altivec_vcmpgtud_p:
14894 case Intrinsic::ppc_altivec_vcmpgtuq_p:
14934 for (
auto J = (*I)->begin(),
JE = (*I)->end(); J !=
JE; ++J) {
14954 if (Constraint.
size() == 1) {
14955 switch (Constraint[0]) {
14973 }
else if (Constraint ==
"wc") {
14975 }
else if (Constraint ==
"wa" || Constraint ==
"wd" ||
14976 Constraint ==
"wf" || Constraint ==
"ws" ||
14977 Constraint ==
"wi" || Constraint ==
"ww") {
14990 Value *CallOperandVal =
info.CallOperandVal;
14993 if (!CallOperandVal)
15042std::pair<unsigned, const TargetRegisterClass *>
15046 if (Constraint.
size() == 1) {
15048 switch (Constraint[0]) {
15051 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
15052 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
15055 return std::make_pair(0U, &PPC::G8RCRegClass);
15056 return std::make_pair(0U, &PPC::GPRCRegClass);
15062 if (Subtarget.
hasSPE()) {
15064 return std::make_pair(0U, &PPC::GPRCRegClass);
15066 return std::make_pair(0U, &PPC::SPERCRegClass);
15069 return std::make_pair(0U, &PPC::F4RCRegClass);
15071 return std::make_pair(0U, &PPC::F8RCRegClass);
15076 return std::make_pair(0U, &PPC::VRRCRegClass);
15079 return std::make_pair(0U, &PPC::CRRCRegClass);
15081 }
else if (Constraint ==
"wc" && Subtarget.
useCRBits()) {
15083 return std::make_pair(0U, &PPC::CRBITRCRegClass);
15084 }
else if ((Constraint ==
"wa" || Constraint ==
"wd" ||
15085 Constraint ==
"wf" || Constraint ==
"wi") &&
15087 return std::make_pair(0U, &PPC::VSRCRegClass);
15088 }
else if ((Constraint ==
"ws" || Constraint ==
"ww") && Subtarget.
hasVSX()) {
15090 return std::make_pair(0U, &PPC::VSSRCRegClass);
15092 return std::make_pair(0U, &PPC::VSFRCRegClass);
15098 if (Constraint.
size() > 3 && Constraint[1] ==
'v' && Constraint[2] ==
's') {
15101 "Attempted to access a vsr out of range");
15103 return std::make_pair(PPC::VSL0 +
VSNum, &PPC::VSRCRegClass);
15104 return std::make_pair(PPC::V0 +
VSNum - 32, &PPC::VSRCRegClass);
15106 std::pair<unsigned, const TargetRegisterClass *> R =
15116 PPC::GPRCRegClass.contains(R.first))
15117 return std::make_pair(
TRI->getMatchingSuperReg(R.first,
15118 PPC::sub_32, &PPC::G8RCRegClass),
15119 &PPC::G8RCRegClass);
15122 if (!R.second &&
StringRef(
"{cc}").equals_lower(Constraint)) {
15123 R.first = PPC::CR0;
15124 R.second = &PPC::CRRCRegClass;
15133 std::string &Constraint,
15134 std::vector<SDValue>&Ops,
15139 if (Constraint.length() > 1)
return;
15141 char Letter = Constraint[0];
15155 int64_t
Value =
CST->getSExtValue();
15197 if (Result.getNode()) {
15198 Ops.push_back(Result);
15230 switch (AM.
Scale) {
15267 bool isPPC64 = Subtarget.
isPPC64();
15302 FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
15304 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
15318 bool isPPC64 = Subtarget.
isPPC64();
15325 .Case(
"r1",
is64Bit ? PPC::X1 : PPC::R1)
15326 .Case(
"r2", isPPC64 ?
Register() : PPC::R2)
15327 .Case(
"r13", (
is64Bit ? PPC::X13 : PPC::R13))
15370 unsigned Intrinsic)
const {
15371 switch (Intrinsic) {
15372 case Intrinsic::ppc_altivec_lvx:
15373 case Intrinsic::ppc_altivec_lvxl:
15374 case Intrinsic::ppc_altivec_lvebx:
15375 case Intrinsic::ppc_altivec_lvehx:
15376 case Intrinsic::ppc_altivec_lvewx:
15377 case Intrinsic::ppc_vsx_lxvd2x:
15378 case Intrinsic::ppc_vsx_lxvw4x:
15379 case Intrinsic::ppc_vsx_lxvd2x_be:
15380 case Intrinsic::ppc_vsx_lxvw4x_be:
15381 case Intrinsic::ppc_vsx_lxvl:
15382 case Intrinsic::ppc_vsx_lxvll: {
15384 switch (Intrinsic) {
15385 case Intrinsic::ppc_altivec_lvebx:
15388 case Intrinsic::ppc_altivec_lvehx:
15391 case Intrinsic::ppc_altivec_lvewx:
15394 case Intrinsic::ppc_vsx_lxvd2x:
15395 case Intrinsic::ppc_vsx_lxvd2x_be:
15405 Info.ptrVal =
I.getArgOperand(0);
15408 Info.align =
Align(1);
15412 case Intrinsic::ppc_altivec_stvx:
15413 case Intrinsic::ppc_altivec_stvxl:
15414 case Intrinsic::ppc_altivec_stvebx:
15415 case Intrinsic::ppc_altivec_stvehx:
15416 case Intrinsic::ppc_altivec_stvewx:
15417 case Intrinsic::ppc_vsx_stxvd2x:
15418 case Intrinsic::ppc_vsx_stxvw4x:
15419 case Intrinsic::ppc_vsx_stxvd2x_be:
15420 case Intrinsic::ppc_vsx_stxvw4x_be:
15421 case Intrinsic::ppc_vsx_stxvl:
15422 case Intrinsic::ppc_vsx_stxvll: {
15424 switch (Intrinsic) {
15425 case Intrinsic::ppc_altivec_stvebx:
15428 case Intrinsic::ppc_altivec_stvehx:
15431 case Intrinsic::ppc_altivec_stvewx:
15434 case Intrinsic::ppc_vsx_stxvd2x:
15435 case Intrinsic::ppc_vsx_stxvd2x_be:
15445 Info.ptrVal =
I.getArgOperand(1);
15448 Info.align =
Align(1);
15466 if (Subtarget.
hasAltivec() && Op.size() >= 16 &&
15467 (Op.isAligned(
Align(16)) ||
15483 assert(Ty->isIntegerTy());
15485 unsigned BitSize = Ty->getPrimitiveSizeInBits();
15486 return !(BitSize == 0 || BitSize > 64);
15490 if (!
Ty1->isIntegerTy() || !
Ty2->isIntegerTy())
15492 unsigned NumBits1 =
Ty1->getPrimitiveSizeInBits();
15493 unsigned NumBits2 =
Ty2->getPrimitiveSizeInBits();
15498 if (!
VT1.isInteger() || !
VT2.isInteger())
15527 "invalid fpext types");
15546 bool *
Fast)
const {
15564 if (Subtarget.
hasVSX()) {
15588 if (!
ConstNode->getAPIntValue().isSignedIntN(64))
15596 int64_t Imm =
ConstNode->getSExtValue();
15601 uint64_t
UImm =
static_cast<uint64_t
>(Imm);
15617 switch (Ty->getScalarType()->getTypeID()) {
15630 if (!
I->hasOneUse())
15634 assert(
User &&
"A single use instruction with no uses.");
15636 switch (
I->getOpcode()) {
15637 case Instruction::FMul: {
15639 if (
User->getOpcode() != Instruction::FSub &&
15640 User->getOpcode() != Instruction::FAdd)
15653 case Instruction::Load: {
15666 if (
User->getOpcode() != Instruction::Store)
15687 PPC::X12, PPC::LR8, PPC::CTR8, 0
15694 const Constant *PersonalityFn)
const {
15695 return Subtarget.
isPPC64() ? PPC::X3 : PPC::R3;
15699 const Constant *PersonalityFn)
const {
15700 return Subtarget.
isPPC64() ? PPC::X4 : PPC::R4;
15705 EVT VT ,
unsigned DefinedValues)
const {
15743 bool LegalOps,
bool OptForSize,
15745 unsigned Depth)
const {
15749 unsigned Opc = Op.getOpcode();
15750 EVT VT = Op.getValueType();
15759 SDValue N0 = Op.getOperand(0);
15761 SDValue N2 = Op.getOperand(2);
15788 }
else if (
NegN1) {
15821 bool ForCodeSize)
const {
15852 unsigned Opcode =
N->getOpcode();
15853 unsigned TargetOpcode;
15873 return DAG.
getNode(TargetOpcode,
SDLoc(
N), VT, N0,
N1->getOperand(0));
15939 SDValue Cmp = Op.getOperand(0);
15940 if (Cmp.getOpcode() !=
ISD::SETCC || !Cmp.hasOneUse() ||
15941 Cmp.getOperand(0).getValueType() !=
MVT::i64)
15965 SDValue Cmp = RHS.getOperand(0);
16075 DAGCombinerInfo &
DCI)
const {
16088 EVT VT =
N->getValueType(0);
16128 return DCI.DAG.getNode(
16174 EVT VT =
N->getValueType(0);
16198 }
else if ((
MulAmtAbs + 1).isPowerOf2()) {
16223 DAGCombinerInfo &
DCI)
const {
16228 EVT VT =
N->getValueType(0);
16231 unsigned Opc =
N->getOpcode();
16233 bool LegalOps = !
DCI.isBeforeLegalizeOps();
16257bool PPCTargetLowering::mayBeEmittedAsTailCall(
const CallInst *CI)
const {
16287bool PPCTargetLowering::hasBitPreservingFPLogic(
EVT VT)
const {
16288 if (!Subtarget.
hasVSX())
16296bool PPCTargetLowering::
16302 if (CI->getBitWidth() > 64)
16304 int64_t ConstVal = CI->getZExtValue();
16306 (
isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
16321 "Only combine this when P9 altivec supported!");
16322 EVT VT =
N->getValueType(0);
16328 if (
N->getOperand(0).getOpcode() ==
ISD::SUB) {
16331 unsigned SubOpcd0 =
N->getOperand(0)->getOperand(0).getOpcode();
16332 unsigned SubOpcd1 =
N->getOperand(0)->getOperand(1).getOpcode();
16338 N->getOperand(0)->getOperand(0),
16339 N->getOperand(0)->getOperand(1),
16344 if (
N->getOperand(0).getValueType() ==
MVT::v4i32 &&
16345 N->getOperand(0).hasOneUse()) {
16347 N->getOperand(0)->getOperand(0),
16348 N->getOperand(0)->getOperand(1),
16362 DAGCombinerInfo &
DCI)
const {
16365 "Only combine this when P9 altivec supported!");
16372 EVT VT =
N->getOperand(1).getValueType();
unsigned const MachineRegisterInfo * MRI
if(Register::isVirtualRegister(Reg)) return MRI -> getRegClass(Reg) ->hasSuperClassEq(&AArch64::GPR64RegClass)
static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, bool IsTailCall)
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const SDLoc &dl)
GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit the specified operations t...
static bool isSignExtended(SDNode *N, SelectionDAG &DAG)
static const unsigned PerfectShuffleTable[6561+1]
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu Simplify well known AMD library false FunctionCallee Callee
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
static std::pair< Register, unsigned > getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static bool isLoad(int Opcode)
static bool isFloatingPointZero(SDValue Op)
isFloatingPointZero - Return true if this is +0.0.
Function Alias Analysis Results
Atomic ordering constants.
SmallVector< MachineOperand, 4 > Cond
static GCRegistry::Add< ShadowStackGC > C("shadow-stack", "Very portable GC for uncooperative code generators")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static RegisterPass< DebugifyModulePass > DM("debugify", "Attach debug info to everything")
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
static bool isUndef(ArrayRef< int > Mask)
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
unsigned const TargetRegisterInfo * TRI
Promote Memory to Register
static bool isConstantOrUndef(const SDValue Op)
Module.h This file contains the declarations for the Module class.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
cl::opt< bool > ANDIGlueBug("expose-ppc-andi-glue-bug", cl::desc("expose the ANDI glue bug on PPC"), cl::Hidden)
static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT, SelectionDAG &DAG, const SDLoc &dl)
getCanonicalConstSplat - Build a canonical splat immediate of Val with an element size of SplatSize.
static bool needStackSlotPassParameters(const PPCSubtarget &Subtarget, const SmallVectorImpl< ISD::OutputArg > &Outs)
static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl< int > &ShuffV, int LHSMaxIdx, int RHSMinIdx, int RHSMaxIdx, int HalfVec)
static bool isAlternatingShuffMask(const ArrayRef< int > &Mask, int NumElts)
static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG, SDValue Input, uint64_t Elems, uint64_t CorrectElems)
static cl::opt< bool > DisablePPCUnaligned("disable-ppc-unaligned", cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden)
static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG, const PPCSubtarget &Subtarget)
static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG)
static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, bool Swap, SDLoc &DL, SelectionDAG &DAG)
This function is called when we have proved that a SETCC node can be replaced by subtraction (and oth...
static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL)
static bool callsShareTOCBase(const Function *Caller, SDValue Callee, const TargetMachine &TM)
static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG, const PPCSubtarget &Subtarget)
static bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget)
static bool isFunctionGlobalAddress(SDValue Callee)
static void CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, SDValue Arg, int SPDiff, unsigned ArgOffset, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments)
CalculateTailCallArgDest - Remember Argument for later processing.
static cl::opt< bool > EnableSoftFP128("enable-soft-fp128", cl::desc("temp option to enable soft fp128"), cl::Hidden)
static void LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, bool isTailCall, bool isVector, SmallVectorImpl< SDValue > &MemOpChains, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments, const SDLoc &dl)
LowerMemOpCallTo - Store the argument to the stack or remember it in case of tail calls.
static bool areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC, CallingConv::ID CalleeCC)
static const MCPhysReg FPR[]
FPR - The set of FP registers that should be allocated for arguments on Darwin and AIX.
static SDNode * isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG)
isCallCompatibleAddress - Return the immediate to use if the specified 32-bit value is representable ...
static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize)
CalculateStackSlotAlignment - Calculates the alignment of this argument on the stack.
static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, bool HasDirectMove, bool HasP8Vector)
Do we have an efficient pattern in a .td file for this node?
static void setUsesTOCBasePtr(MachineFunction &MF)
static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG, const SDLoc &dl, const PPCSubtarget &Subtarget)
static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, unsigned NumBytes)
EnsureStackAlignment - Round stack frame size up from NumBytes to ensure minimum alignment required f...
static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N, SelectionDAG &DAG)
static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB)
static bool isFPExtLoad(SDValue Op)
static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, const SDLoc &dl, EVT DestVT=MVT::Other)
BuildIntrinsicOp - Return a unary operator intrinsic node with the specified intrinsic ID.
static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, unsigned Bytes, int Dist, SelectionDAG &DAG)
static void StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, SDValue Chain, const SmallVectorImpl< TailCallArgumentInfo > &TailCallArgs, SmallVectorImpl< SDValue > &MemOpChains, const SDLoc &dl)
StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
static cl::opt< bool > UseAbsoluteJumpTables("ppc-use-absolute-jumptables", cl::desc("use absolute jump tables on ppc"), cl::Hidden)
static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign)
getMaxByValAlign - Helper for getByValTypeAlignment to determine the desired ByVal argument alignment...
static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, unsigned Bytes, int Dist, SelectionDAG &DAG)
static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned LHSStart, unsigned RHSStart)
isVMerge - Common function, used to match vmrg* shuffles.
static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, unsigned &HiOpFlags, unsigned &LoOpFlags, const GlobalValue *GV=nullptr)
Return true if we should reference labels using a PICBase, set the HiOpFlags and LoOpFlags to the tar...
static void buildCallOperands(SmallVectorImpl< SDValue > &Ops, PPCTargetLowering::CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG, SmallVector< std::pair< unsigned, SDValue >, 8 > &RegsToPass, SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff, const PPCSubtarget &Subtarget)
static cl::opt< bool > DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32", cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden)
static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget &ST)
Returns true if we should use a direct load into vector instruction (such as lxsd or lfd),...
static cl::opt< bool > DisableSCO("disable-ppc-sco", cl::desc("disable sibling call optimization on ppc"), cl::Hidden)
static void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, SDValue FPOp, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments)
static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT)
static cl::opt< bool > DisablePPCPreinc("disable-ppc-preinc", cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden)
static SDValue convertFPToInt(SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget)
static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize)
CalculateStackSlotSize - Calculates the size reserved for this argument on the stack.
static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG)
static int CalculateTailCallSPDiff(SelectionDAG &DAG, bool isTailCall, unsigned ParamSize)
CalculateTailCallSPDiff - Get the amount the stack pointer has to be adjusted to accommodate the argu...
static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee, SDValue &Glue, SDValue &Chain, const SDLoc &dl)
static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, SelectionDAG &DAG)
static SDValue isScalarToVec(SDValue Op)
static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl)
static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, bool &isDot, const PPCSubtarget &Subtarget)
getVectorCompareInfo - Given an intrinsic, return false if it is not a vector comparison.
static unsigned invertFMAOpcode(unsigned Opc)
static Instruction * callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id)
static const SDValue * getNormalLoadInput(const SDValue &Op, bool &IsPermuted)
static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG, const PPCSubtarget &Subtarget, SDValue Chain=SDValue())
static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget)
static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, SDValue OldRetAddr, SDValue OldFP, int SPDiff, const SDLoc &dl)
EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to the appropriate stack sl...
static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, SelectionDAG &DAG, const SDLoc &dl)
BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified amount.
static SDValue combineBVZEXTLOAD(SDNode *N, SelectionDAG &DAG)
static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT, SelectionDAG &DAG, SDValue ArgValue, MVT LocVT, const SDLoc &dl)
static const TargetRegisterClass * getRegClassForSVT(MVT::SimpleValueType SVT, bool IsPPC64)
cl::opt< bool > ANDIGlueBug
static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart)
static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize, unsigned LinkageSize, unsigned ParamAreaSize, unsigned &ArgOffset, unsigned &AvailableFPRs, unsigned &AvailableVRs)
CalculateStackSlotUsed - Return whether this argument will use its stack slot (instead of being passe...
static unsigned getPPCStrictOpcode(unsigned Opc)
static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee, SDValue &Glue, SDValue &Chain, SDValue CallSeqStart, const CallBase *CB, const SDLoc &dl, bool hasNest, const PPCSubtarget &Subtarget)
static bool isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width)
static bool isSplatBV(SDValue Op)
static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG)
static cl::opt< bool > DisableILPPref("disable-ppc-ilp-pref", cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden)
static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int)
Check that the mask is shuffling N byte elements.
static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG)
Reduce the number of loads when building a vector.
static bool isValidPCRelNode(SDValue N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isSplat(ArrayRef< Value * > VL)
#define STATISTIC(VARNAME, DESC)
This file describes how to lower LLVM code to machine code.
This defines the Use class.
static Optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static bool is64Bit(const char *name)
Class for arbitrary precision integers.
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
This class represents an incoming formal argument to a Function.
StringRef getValueAsString() const
Return the attribute's value as a string.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
The address of a basic block.
static BranchProbability getOne()
static BranchProbability getZero()
A "pseudo-class" with methods for operating on BUILD_VECTORs.
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, unsigned Offset, MVT LocVT, LocInfo HTP)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, unsigned Offset, MVT LocVT, LocInfo HTP)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation.
CallingConv::ID getCallingConv() const
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
Value * getCalledOperand() const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
unsigned getLargestLegalIntTypeSizeInBits() const
Returns the size of largest legal integer type size, or 0 if none are set.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
const Function & getFunction() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
const GlobalObject * getBaseObject() const
StringRef getSection() const
Module * getParent()
Get the module that this global value is contained inside of...
bool isStrongDefinitionForLinker() const
Returns true if this global's definition will be the one chosen by the linker.
const BasicBlock * getParent() const
bool hasAtomicLoad() const
Return true if this atomic instruction loads from memory.
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
This is an important class for using LLVM in a threaded context.
Base class for LoadSDNode and StoreSDNode.
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
const std::vector< LoopT * > & getSubLoops() const
Return the loops contained entirely within this loop.
unsigned getLoopDepth() const
Return the nesting level of this loop.
block_iterator block_end() const
block_iterator block_begin() const
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
Wrapper class representing physical registers. Should be passed by value.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
static mvt_range fixedlen_vector_valuetypes()
uint64_t getScalarSizeInBits() const
@ INVALID_SIMPLE_VALUE_TYPE
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static mvt_range integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
static mvt_range fp_valuetypes()
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setReturnAddressIsTaken(bool s)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool hasVAStart() const
Returns true if the function calls the llvm.va_start intrinsic.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineModuleInfo & getMMI() const
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
A description of a memory reference used in the backend.
uint64_t getSize() const
Return the size in bytes of the memory reference.
Flags
Flags values. These may be or'd together.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
const MCContext & getContext() const
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateImm(int64_t Val)
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register getLiveInVirtReg(MCRegister PReg) const
getLiveInVirtReg - If PReg is a live-in physical register, return the corresponding live-in physical ...
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
This is an abstract virtual class for memory operations.
const SDValue & getBasePtr() const
A Module instance is used to store all the information related to an LLVM module.
unsigned getTOCSaveOffset() const
getTOCSaveOffset - Return the previous frame offset to save the TOC register – 64-bit SVR4 ABI only.
unsigned getLinkageSize() const
getLinkageSize - Return the size of the PowerPC ABI linkage area.
unsigned getFramePointerSaveOffset() const
getFramePointerSaveOffset - Return the previous frame offset to save the frame pointer.
unsigned getReturnSaveOffset() const
getReturnSaveOffset - Return the previous frame offset to save the return address.
PPCFunctionInfo - This class is derived from MachineFunction private PowerPC target-specific informat...
void setVarArgsNumFPR(unsigned Num)
void setReturnAddrSaveIndex(int idx)
int getReturnAddrSaveIndex() const
unsigned getVarArgsNumFPR() const
int getFramePointerSaveIndex() const
void setVarArgsNumGPR(unsigned Num)
void appendParameterType(ParamType Type)
int getVarArgsFrameIndex() const
void setLRStoreRequired()
void setTailCallSPDelta(int size)
void setMinReservedArea(unsigned size)
unsigned getVarArgsNumGPR() const
unsigned getMinReservedArea() const
void setVarArgsStackOffset(int Offset)
void setVarArgsFrameIndex(int Index)
void addLiveInAttr(Register VReg, ISD::ArgFlagsTy Flags)
This function associates attributes for each live-in virtual register.
int getVarArgsStackOffset() const
void setFramePointerSaveIndex(int Idx)
bool useLongCalls() const
bool is32BitELFABI() const
unsigned descriptorTOCAnchorOffset() const
bool useSoftFloat() const
bool use64BitRegs() const
use64BitRegs - Return true if in 64-bit mode or if we should use 64-bit registers in 32-bit mode when...
bool allowsUnalignedFPAccess() const
const PPCFrameLowering * getFrameLowering() const override
bool needsSwapsForVSXMemOps() const
bool isPPC64() const
isPPC64 - Return true if we are generating code for 64-bit pointer mode.
bool needsTwoConstNR() const
bool isUsingPCRelativeCalls() const
bool usesFunctionDescriptors() const
True if the ABI is descriptor based.
MCRegister getEnvironmentPointerRegister() const
const PPCInstrInfo * getInstrInfo() const override
bool useCRBits() const
useCRBits - Return true if we should store and manipulate i1 values in the individual condition regis...
bool hasRecipPrec() const
bool hasInvariantFunctionDescriptors() const
unsigned getCPUDirective() const
getCPUDirective - Returns the -m directive specified for the cpu.
POPCNTDKind hasPOPCNTD() const
bool hasPrefixInstrs() const
bool hasPartwordAtomics() const
bool isLittleEndian() const
bool isTargetLinux() const
bool hasP9Altivec() const
MCRegister getTOCPointerRegister() const
MCRegister getStackPointerRegister() const
bool has64BitSupport() const
has64BitSupport - Return true if the selected CPU supports 64-bit instructions, regardless of whether...
bool is64BitELFABI() const
bool pairedVectorMemops() const
const PPCTargetMachine & getTargetMachine() const
bool isPredictableSelectIsExpensive() const
bool enableMachineScheduler() const override
Scheduling customization.
const PPCRegisterInfo * getRegisterInfo() const override
bool isGVIndirectSymbol(const GlobalValue *GV) const
True if the GV will be accessed via an indirect symbol.
unsigned descriptorEnvironmentPointerOffset() const
bool hasDirectMove() const
bool hasP8Altivec() const
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
unsigned getStackProbeSize(MachineFunction &MF) const
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
isTruncateFree - Return true if it's free to truncate a value of type Ty1 to type Ty2.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
bool isFPExtFree(EVT DestVT, EVT SrcVT) const override
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName() - This method returns the name of a target specific DAG node.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
MachineBasicBlock * emitProbedAlloca(MachineInstr &MI, MachineBasicBlock *MBB) const
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
MachineBasicBlock * EmitPartwordAtomicBinary(MachineInstr &MI, MachineBasicBlock *MBB, bool is8bit, unsigned Opcode, unsigned CmpOpcode=0, unsigned CmpPred=0) const
SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const override
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, MaybeAlign EncodingAlignment) const
SelectAddressRegImm - Returns true if the address N can be represented by a base register plus a sign...
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override
getByValTypeAlignment - Return the desired alignment for ByVal aggregate function arguments in the ca...
MachineBasicBlock * EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *MBB, unsigned AtomicSize, unsigned BinOpcode, unsigned CmpOpcode=0, unsigned CmpPred=0) const
SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const override
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SelectAddressRegRegOnly - Given the specified addressed, force it to be represented as an indexed [r+...
bool useSoftFloat() const override
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool enableAggressiveFMAFusion(EVT VT) const override
Return true if target always beneficiates from combining into FMA for a given value type.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
Instruction * emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
bool preferIncOfAddToSubOfNot(EVT VT) const override
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPreIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mod...
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
bool isProfitableToHoist(Instruction *I) const override
isProfitableToHoist - Check if it is profitable to hoist instruction I to its dominator block.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint, return the type of constraint it is for this target.
const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const override
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
It returns EVT::Other if the type should be determined using generic target-independent logic.
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, unsigned Align=1, MachineMemOperand::Flags Flags=MachineMemOperand::MONone, bool *Fast=nullptr) const override
Is unaligned memory access allowed for the given type, and is it fast relative to software emulation.
SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const
bool useLoadStackGuardNode() const override
Override to support customized stack guard loading.
bool hasInlineStackProbe(MachineFunction &MF) const override
PPCTargetLowering(const PPCTargetMachine &TM, const PPCSubtarget &STI)
bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG, MaybeAlign EncodingAlignment=None) const
SelectAddressRegReg - Given the specified addressed, check to see if it can be more efficiently repre...
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster than a pair of fmul and fadd i...
bool shouldExpandBuildVectorWithShuffles(EVT VT, unsigned DefinedValues) const override
bool SelectAddressRegImm34(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG) const
Similar to the 16-bit case but for instructions that take a 34-bit displacement field (prefixed loads...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
bool isJumpTableRelative() const override
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
bool SelectAddressPCRel(SDValue N, SDValue &Base) const
SelectAddressPCRel - Represent the specified address as pc relative to be represented as [pc+imm].
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
Instruction * emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst, AtomicOrdering Ord) const override
bool SelectAddressEVXRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SelectAddressEVXRegReg - Given the specified addressed, check to see if it can be more efficiently re...
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate,...
bool isAccessedAsGotIndirect(SDValue N) const
Align getPrefLoopAlignment(MachineLoop *ML) const override
Return the preferred loop alignment.
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const override
createFastISel - This method returns a target-specific FastISel object, or null if the target does no...
bool isLegalAddImmediate(int64_t Imm) const override
isLegalAddImmediate - Return true if the specified immediate is legal add immediate,...
Common code between 32-bit and 64-bit PowerPC targets.
Wrapper class representing virtual and physical registers.
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
const SDValue & getOperand(unsigned Num) const
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
bool isPredecessorOf(const SDNode *N) const
Return true if this node is a predecessor of N.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
const SDNodeFlags getFlags() const
static use_iterator use_end()
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
unsigned getNumOperands() const
static SectionKind getMetadata()
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
const TargetSubtargetInfo & getSubtarget() const
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=None, int Offset=0, unsigned TargetFlags=0)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0)
Test whether V has a splatted value for all the demanded elements.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, uint64_t Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset is a class to represent an offset with 2 dimensions, named fixed and scalable,...
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
std::enable_if_t< std::numeric_limits< T >::is_signed, bool > getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
LLVM_NODISCARD size_t size() const
size - Get the string size.
LLVM_NODISCARD const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Class to represent struct types.
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual bool shouldExpandBuildVectorWithShuffles(EVT, unsigned DefinedValues) const
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
void setMinStackArgumentAlignment(Align Alignment)
Set the minimum stack alignment of an argument.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
const TargetMachine & getTargetMachine() const
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual Align getPrefLoopAlignment(MachineLoop *ML=nullptr) const
Return the preferred loop alignment.
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?...
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool isJumpTableRelative() const
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
NegatibleCost
Enum that specifies when a float negation is beneficial.
std::vector< ArgListEntry > ArgListTy
void setHasMultipleConditionRegisters(bool hasManyRegs=true)
Tells the code generator that the target has multiple (allocatable) condition registers that can be u...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
virtual MCSymbol * getFunctionEntryPointSymbol(const GlobalValue *Func, const TargetMachine &TM) const
If supported, return the function entry point symbol.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
SDValue getCheaperNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, unsigned Depth=0) const
This is the helper function to return the newly negated expression only when the cost is cheaper.
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, const DenormalMode &Mode) const
Return a target-dependent comparison result if the input operand is suitable for use with a square ro...
virtual SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const
Returns relocation base for the given PIC jumptable.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual SDValue getSqrtResultForDenormInput(SDValue Operand, SelectionDAG &DAG) const
Return a target-dependent result if the input operand is not suitable for use with a square root esti...
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
Primary interface to the complete machine description for the target machine.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
Reloc::Model getRelocationModel() const
Returns the code generation relocation model.
CodeModel::Model getCodeModel() const
Returns the code model.
bool shouldAssumeDSOLocal(const Module &M, const GlobalValue *GV) const
unsigned UnsafeFPMath
UnsafeFPMath - This flag is enabled when the -enable-unsafe-fp-math flag is specified on the command ...
unsigned NoInfsFPMath
NoInfsFPMath - This flag is enabled when the -enable-no-infs-fp-math flag is specified on the command...
unsigned NoSignedZerosFPMath
NoSignedZerosFPMath - This flag is enabled when the -enable-no-signed-zeros-fp-math is specified on t...
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fuse-fp-ops=xxx option.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Target - Wrapper for Target specific information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static TypeSize Fixed(ScalarTy MinVal)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
@ FP128TyID
128-bit floating point type (112-bit significand)
static Type * getVoidTy(LLVMContext &C)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Implementation for an ilist node.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_TLS
MO_TLS - Indicates that the operand being accessed is some kind of thread-local symbol.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Fast
Fast - This calling convention attempts to make calls as fast as possible (e.g.
@ C
C - The default llvm calling convention, compatible with C.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ FLT_ROUNDS_
FLT_ROUNDS_ - Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest 2 Round to ...
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ INIT_TRAMPOLINE
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
@ STRICT_FSQRT
Constrained versions of libm-equivalent floating point intrinsics.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ BR
Control flow instructions. These all have token chains.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ TargetGlobalAddress
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimum or maximum on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned integers.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ CALLSEQ_START
CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of a call sequence,...
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ ADJUST_TRAMPOLINE
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool isSignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs a signed comparison when used with integer o...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isUnsignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs an unsigned comparison when used with intege...
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
@ Bitcast
Perform the operation on a different, but equivalently sized type.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ MO_GOT_TPREL_PCREL_FLAG
MO_GOT_TPREL_PCREL_FLAG - A combintaion of flags, if these bits are set they should produce the reloc...
@ MO_PCREL_FLAG
MO_PCREL_FLAG - If this bit is set, the symbol reference is relative to the current instruction addre...
@ MO_GOT_FLAG
MO_GOT_FLAG - If this bit is set the symbol reference is to be computed via the GOT.
@ MO_PLT
On a symbol operand "FOO", this indicates that the reference is actually to "FOO@plt".
@ MO_TPREL_FLAG
MO_TPREL_FLAG - If this bit is set the symbol reference is relative to TLS Initial Exec model.
@ MO_LO
MO_LO, MO_HA - lo16(symbol) and ha16(symbol)
@ MO_GOT_TLSLD_PCREL_FLAG
MO_GOT_TLSLD_PCREL_FLAG - A combintaion of flags, if these bits are set they should produce the reloc...
@ MO_GOT_TLSGD_PCREL_FLAG
MO_GOT_TLSGD_PCREL_FLAG - A combintaion of flags, if these bits are set they should produce the reloc...
@ MO_PIC_FLAG
MO_PIC_FLAG - If this bit is set, the symbol reference is relative to the function's picbase,...
@ FCTIDUZ
Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for unsigned integers with round ...
@ ADDI_TLSGD_L_ADDR
G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSGD_L and GET_TLS_ADDR unti...
@ FSQRT
Square root instruction.
@ STRICT_FCFID
Constrained integer-to-floating-point conversion instructions.
@ DYNALLOC
The following two target-specific nodes are used for calls through function pointers in the 64-bit SV...
@ COND_BRANCH
CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This corresponds to the COND_BRANCH pseudo ...
@ VABSD
An SDNode for Power9 vector absolute value difference.
@ STORE_VEC_BE
CHAIN = STORE_VEC_BE CHAIN, VSRC, Ptr - Occurs only for little endian.
@ BDNZ
CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based loops.
@ MTVSRZ
Direct move from a GPR to a VSX register (zero)
@ SRL
These nodes represent PPC shifts.
@ VECINSERT
VECINSERT - The PPC vector insert instruction.
@ LXSIZX
GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an integer smaller than 64 bits into ...
@ FNMSUB
FNMSUB - Negated multiply-subtract instruction.
@ RFEBB
CHAIN = RFEBB CHAIN, State - Return from event-based branch.
@ FCTIDZ
FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 operand, producing an f64 value...
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
@ GET_TLS_ADDR
x3 = GET_TLS_ADDR x3, Symbol - For the general-dynamic TLS model, produces a call to __tls_get_addr(s...
@ FP_TO_UINT_IN_VSR
Floating-point-to-interger conversion instructions.
@ XXSPLTI32DX
XXSPLTI32DX - The PPC XXSPLTI32DX instruction.
@ ANDI_rec_1_EQ_BIT
i1 = ANDI_rec_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the eq or gt bit of CR0 after ex...
@ FRE
Reciprocal estimate instructions (unary FP ops).
@ ADDIS_GOT_TPREL_HA
G8RC = ADDIS_GOT_TPREL_HA x2, Symbol - Used by the initial-exec TLS model, produces an ADDIS8 instruc...
@ CLRBHRB
CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
@ SINT_VEC_TO_FP
Extract a subvector from signed integer vector and convert to FP.
@ EXTRACT_SPE
Extract SPE register component, second argument is high or low.
@ XXSWAPD
VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little endian.
@ ADDI_TLSLD_L_ADDR
G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSLD_L and GET_TLSLD_ADDR un...
@ ATOMIC_CMP_SWAP_8
ATOMIC_CMP_SWAP - the exact same as the target-independent nodes except they ensure that the compare ...
@ ST_VSR_SCAL_INT
Store scalar integers from VSR.
@ VCMP
RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* instructions.
@ BCTRL
CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a BCTRL instruction.
@ BUILD_SPE64
BUILD_SPE64 and EXTRACT_SPE are analogous to BUILD_PAIR and EXTRACT_ELEMENT but take f64 arguments in...
@ LFIWZX
GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point load which zero-extends from a 32-bit inte...
@ SCALAR_TO_VECTOR_PERMUTED
PowerPC instructions that have SCALAR_TO_VECTOR semantics tend to place the value into the least sign...
@ EXTRACT_VSX_REG
EXTRACT_VSX_REG = Extract one of the underlying vsx registers of an accumulator or pair register.
@ STXSIX
STXSIX - The STXSI[bh]X instruction.
@ MAT_PCREL_ADDR
MAT_PCREL_ADDR = Materialize a PC Relative address.
@ MFOCRF
R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
@ XXSPLT
XXSPLT - The PPC VSX splat instructions.
@ TOC_ENTRY
GPRC = TOC_ENTRY GA, TOC Loads the entry for GA from the TOC, where the TOC base is given by the last...
@ XXPERMDI
XXPERMDI - The PPC XXPERMDI instruction.
@ ADDIS_DTPREL_HA
G8RC = ADDIS_DTPREL_HA x3, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction t...
@ ADD_TLS
G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS model, produces an ADD instruction that ...
@ MTVSRA
Direct move from a GPR to a VSX register (algebraic)
@ VADD_SPLAT
VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded during instruction selection to optimi...
@ PPC32_GOT
GPRC = address of GLOBAL_OFFSET_TABLE.
@ ADDI_DTPREL_L
G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction ...
@ BCTRL_LOAD_TOC
CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl instruction and the TOC reload r...
@ PPC32_PICGOT
GPRC = address of GLOBAL_OFFSET_TABLE.
@ FCFID
FCFID - The FCFID instruction, taking an f64 operand and producing and f64 value containing the FP re...
@ CR6SET
ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
@ LBRX
GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a byte-swapping load instruction.
@ LD_VSX_LH
VSRC, CHAIN = LD_VSX_LH CHAIN, Ptr - This is a floating-point load of a v2f32 value into the lower ha...
@ PROBED_ALLOCA
To avoid stack clash, allocation is performed by block and each block is probed.
@ XXMFACC
XXMFACC = This corresponds to the xxmfacc instruction.
@ ADDIS_TLSGD_HA
G8RC = ADDIS_TLSGD_HA x2, Symbol - For the general-dynamic TLS model, produces an ADDIS8 instruction ...
@ ACC_BUILD
ACC_BUILD = Build an accumulator register from 4 VSX registers.
@ GlobalBaseReg
The result of the mflr at function entry, used for PIC code.
@ LXVD2X
VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
@ CALL
CALL - A direct function call.
@ MTCTR
CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a MTCTR instruction.
@ TC_RETURN
TC_RETURN - A tail call return.
@ STFIWX
STFIWX - The STFIWX instruction.
@ LD_SPLAT
VSRC, CHAIN = LD_SPLAT, CHAIN, Ptr - a splatting load memory instructions such as LXVDSX,...
@ VCMP_rec
RESVEC, OUTFLAG = VCMP_rec(LHS, RHS, OPC) - Represents one of the altivec VCMP*_rec instructions.
@ MFFS
F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
@ PADDI_DTPREL
G8RC = PADDI_DTPREL x3, Symbol - For the pc-rel based local-dynamic TLS model, produces a PADDI8 inst...
@ BUILD_FP128
Direct move of 2 consecutive GPR to a VSX register.
@ VEXTS
VEXTS, ByteWidth - takes an input in VSFRC and produces an output in VSFRC that is sign-extended from...
@ TLS_LOCAL_EXEC_MAT_ADDR
TLS_LOCAL_EXEC_MAT_ADDR = Materialize an address for TLS global address when using local exec access ...
@ VPERM
VPERM - The PPC VPERM Instruction.
@ ADDIS_TLSLD_HA
G8RC = ADDIS_TLSLD_HA x2, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction th...
@ XXSPLTI_SP_TO_DP
XXSPLTI_SP_TO_DP - The PPC VSX splat instructions for immediates for converting immediate single prec...
@ GET_TLSLD_ADDR
x3 = GET_TLSLD_ADDR x3, Symbol - For the local-dynamic TLS model, produces a call to __tls_get_addr(s...
@ ADDI_TLSGD_L
x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS model, produces an ADDI8 instruction t...
@ DYNAREAOFFSET
This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to compute an offset from native ...
@ PAIR_BUILD
PAIR_BUILD = Build a vector pair register from 2 VSX registers.
@ STRICT_FADDRTZ
Constrained floating point add in round-to-zero mode.
@ FTSQRT
Test instruction for software square root.
@ FP_EXTEND_HALF
FP_EXTEND_HALF(VECTOR, IDX) - Custom extend upper (IDX=0) half or lower (IDX=1) half of v4f32 to v2f6...
@ RET_FLAG
Return with a flag operand, matched by 'blr'.
@ CMPB
The CMPB instruction (takes two operands of i32 or i64).
@ VECSHL
VECSHL - The PPC vector shift left instruction.
@ ADDI_TLSLD_L
x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction tha...
@ FADDRTZ
F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding towards zero.
@ XSMAXCDP
XSMAXCDP, XSMINCDP - C-type min/max instructions.
@ SRA_ADDZE
The combination of sra[wd]i and addze used to implemented signed integer division by a power of 2.
@ EXTSWSLI
EXTSWSLI = The PPC extswsli instruction, which does an extend-sign word and shift left immediate.
@ STXVD2X
CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
@ UINT_VEC_TO_FP
Extract a subvector from unsigned integer vector and convert to FP.
@ LXVRZX
LXVRZX - Load VSX Vector Rightmost and Zero Extend This node represents v1i128 BUILD_VECTOR of a zero...
@ MFBHRBE
GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch history rolling buffer entry.
@ FCFIDU
Newer FCFID[US] integer-to-floating-point conversion instructions for unsigned integers and single-pr...
@ FSEL
FSEL - Traditional three-operand fsel node.
@ SWAP_NO_CHAIN
An SDNode for swaps that are not associated with any loads/stores and thereby have no chain.
@ LOAD_VEC_BE
VSRC, CHAIN = LOAD_VEC_BE CHAIN, Ptr - Occurs only for little endian.
@ LFIWAX
GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point load which sign-extends from a 32-bit inte...
@ STBRX
CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a byte-swapping store instruction.
@ LD_GOT_TPREL_L
G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec TLS model, produces a LD instruction ...
@ MFVSR
Direct move from a VSX register to a GPR.
@ TLS_DYNAMIC_MAT_PCREL_ADDR
TLS_DYNAMIC_MAT_PCREL_ADDR = Materialize a PC Relative address for TLS global address when using dyna...
@ Hi
Hi/Lo - These represent the high and low 16-bit parts of a global address respectively.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG)
get_VSPLTI_elt - If this is a build_vector of constants which can be formed by using a vspltis[bhw] i...
bool isXXBRDShuffleMask(ShuffleVectorSDNode *N)
isXXBRDShuffleMask - Return true if this is a shuffle mask suitable for a XXBRD instruction.
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo)
bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for a VRGH* instruction with the ...
bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a VPKUDUM instruction.
bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for a VMRGEW or VMRGOW instructi...
bool isXXBRQShuffleMask(ShuffleVectorSDNode *N)
isXXBRQShuffleMask - Return true if this is a shuffle mask suitable for a XXBRQ instruction.
bool isXXBRWShuffleMask(ShuffleVectorSDNode *N)
isXXBRWShuffleMask - Return true if this is a shuffle mask suitable for a XXBRW instruction.
bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable for a XXPERMDI instruction.
bool isXXBRHShuffleMask(ShuffleVectorSDNode *N)
isXXBRHShuffleMask - Return true if this is a shuffle mask suitable for a XXBRH instruction.
unsigned getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize, SelectionDAG &DAG)
getSplatIdxForPPCMnemonics - Return the splat index as a value that is appropriate for PPC mnemonics ...
bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable for a XXSLDWI instruction.
int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift amount, otherwise return -1.
bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for a VRGL* instruction with the ...
bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, unsigned &InsertAtByte, bool &Swap, bool IsLE)
isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by the XXINSERTW instruction intr...
bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize)
isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand specifies a splat of a singl...
bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a VPKUWUM instruction.
bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a VPKUHUM instruction.
@ XTY_ER
External reference.
This class represents lattice values for constants.
static bool isIndirectCall(const MachineInstr &MI)
constexpr bool isUInt< 16 >(uint64_t x)
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
bool CC_PPC32_SVR4_ByVal(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
bool isIntS16Immediate(SDNode *N, int16_t &Imm)
isIntS16Immediate - This method tests to see if the node is either a 32-bit or 64-bit immediate,...
bool CC_PPC32_SVR4_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
constexpr bool isInt< 16 >(int64_t x)
uint32_t FloatToBits(float Float)
This function takes a float and returns the bit equivalent 32-bit integer.
unsigned M1(unsigned Val)
bool isReleaseOrStronger(AtomicOrdering AO)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
uint64_t PowerOf2Floor(uint64_t A)
Returns the power of two which is less than or equal to the given value.
bool RetCC_PPC_Cold(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
bool convertToNonDenormSingle(APInt &ArgAPInt)
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
unsigned countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1.
bool CC_PPC32_SVR4(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool RetCC_PPC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Mod
The access may modify the value stored in memory.
bool isIntS34Immediate(SDNode *N, int64_t &Imm)
isIntS34Immediate - This method tests if value of node given can be accurately represented as a sign ...
@ Z
zlib style complession
@ Mul
Product of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
unsigned M0(unsigned Val)
ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
bool isAcquireOrStronger(AtomicOrdering AO)
constexpr int32_t SignExtend32(uint32_t X)
Sign-extend the number in the bottom B bits of X to a 32-bit integer.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr unsigned BitWidth
Align commonAlignment(Align A, Align B)
Returns the alignment that satisfies both alignments.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static const fltSemantics & IEEEsingle() LLVM_READNONE
static constexpr roundingMode rmNearestTiesToEven
static const fltSemantics & PPCDoubleDouble() LLVM_READNONE
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Represent subnormal handling kind for floating point instruction inputs and outputs.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
std::string getEVTString() const
This function returns value type as a string, e.g. "i32".
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
void resetAll()
Resets the known state of all bits.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Structure that collects some common arguments that get passed around between the functions for call l...
These are IR-level optimization flags that may be propagated to SDNodes.
void setNoFPExcept(bool b)
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals